aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--Documentation/ABI/stable/sysfs-acpi-pmprofile22
-rw-r--r--Documentation/DocBook/drm.tmpl308
-rw-r--r--Documentation/DocBook/mtdnand.tmpl19
-rw-r--r--Documentation/cgroups/freezer-subsystem.txt4
-rw-r--r--Documentation/devicetree/bindings/mtd/atmel-dataflash.txt14
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt1
-rw-r--r--Kbuild2
-rw-r--r--MAINTAINERS15
-rw-r--r--Makefile6
-rw-r--r--arch/arm/boot/dts/tegra-ventana.dts3
-rw-r--r--arch/arm/mach-at91/at91cap9.c4
-rw-r--r--arch/arm/mach-at91/at91cap9_devices.c13
-rw-r--r--arch/arm/mach-at91/at91rm9200_devices.c11
-rw-r--r--arch/arm/mach-at91/at91sam9260.c4
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c11
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c5
-rw-r--r--arch/arm/mach-at91/at91sam9263.c4
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c11
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c4
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c13
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c7
-rw-r--r--arch/arm/mach-at91/board-1arm.c2
-rw-r--r--arch/arm/mach-at91/board-afeb-9260v1.c11
-rw-r--r--arch/arm/mach-at91/board-cam60.c11
-rw-r--r--arch/arm/mach-at91/board-cap9adk.c11
-rw-r--r--arch/arm/mach-at91/board-carmeva.c2
-rw-r--r--arch/arm/mach-at91/board-cpu9krea.c2
-rw-r--r--arch/arm/mach-at91/board-cpuat91.c2
-rw-r--r--arch/arm/mach-at91/board-csb337.c2
-rw-r--r--arch/arm/mach-at91/board-csb637.c2
-rw-r--r--arch/arm/mach-at91/board-eb9200.c2
-rw-r--r--arch/arm/mach-at91/board-ecbat91.c2
-rw-r--r--arch/arm/mach-at91/board-eco920.c2
-rw-r--r--arch/arm/mach-at91/board-foxg20.c2
-rw-r--r--arch/arm/mach-at91/board-gsia18s.c2
-rw-r--r--arch/arm/mach-at91/board-kafa.c2
-rw-r--r--arch/arm/mach-at91/board-kb9202.c11
-rw-r--r--arch/arm/mach-at91/board-neocore926.c11
-rw-r--r--arch/arm/mach-at91/board-pcontrol-g20.c2
-rw-r--r--arch/arm/mach-at91/board-picotux200.c2
-rw-r--r--arch/arm/mach-at91/board-qil-a9260.c11
-rw-r--r--arch/arm/mach-at91/board-rm9200dk.c11
-rw-r--r--arch/arm/mach-at91/board-rm9200ek.c2
-rw-r--r--arch/arm/mach-at91/board-rsi-ews.c2
-rw-r--r--arch/arm/mach-at91/board-sam9-l9260.c11
-rw-r--r--arch/arm/mach-at91/board-sam9260ek.c11
-rw-r--r--arch/arm/mach-at91/board-sam9261ek.c9
-rw-r--r--arch/arm/mach-at91/board-sam9263ek.c11
-rw-r--r--arch/arm/mach-at91/board-sam9g20ek.c11
-rw-r--r--arch/arm/mach-at91/board-sam9m10g45ek.c11
-rw-r--r--arch/arm/mach-at91/board-sam9rlek.c9
-rw-r--r--arch/arm/mach-at91/board-snapper9260.c12
-rw-r--r--arch/arm/mach-at91/board-stamp9g20.c2
-rw-r--r--arch/arm/mach-at91/board-usb-a926x.c11
-rw-r--r--arch/arm/mach-at91/board-yl-9200.c13
-rw-r--r--arch/arm/mach-at91/cpuidle.c41
-rw-r--r--arch/arm/mach-at91/include/mach/board.h17
-rw-r--r--arch/arm/mach-at91/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-mityomapl138.c3
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c2
-rw-r--r--arch/arm/mach-davinci/board-tnetv107x-evm.c2
-rw-r--r--arch/arm/mach-davinci/cpuidle.c51
-rw-r--r--arch/arm/mach-davinci/include/mach/nand.h4
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c23
-rw-r--r--arch/arm/mach-exynos/cpuidle.c30
-rw-r--r--arch/arm/mach-imx/Makefile.boot34
-rw-r--r--arch/arm/mach-imx/clock-imx6q.c17
-rw-r--r--arch/arm/mach-kirkwood/cpuidle.c42
-rw-r--r--arch/arm/mach-mmp/aspenite.c5
-rw-r--r--arch/arm/mach-msm/Makefile2
-rw-r--r--arch/arm/mach-msm/board-msm7x30.c4
-rw-r--r--arch/arm/mach-msm/board-msm8960.c4
-rw-r--r--arch/arm/mach-msm/board-msm8x60.c4
-rw-r--r--arch/arm/mach-msm/scm.c3
-rw-r--r--arch/arm/mach-mx5/clock-mx51-mx53.c6
-rw-r--r--arch/arm/mach-mxs/mach-mx28evk.c4
-rw-r--r--arch/arm/mach-omap1/board-palmz71.c1
-rw-r--r--arch/arm/mach-omap1/pm.c2
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c1
-rw-r--r--arch/arm/mach-omap2/board-generic.c8
-rw-r--r--arch/arm/mach-omap2/board-h4.c122
-rw-r--r--arch/arm/mach-omap2/clkt_dpll.c51
-rw-r--r--arch/arm/mach-omap2/clock.h2
-rw-r--r--arch/arm/mach-omap2/clock2420_data.c12
-rw-r--r--arch/arm/mach-omap2/clock2430_data.c12
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c12
-rw-r--r--arch/arm/mach-omap2/clock44xx.h7
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c43
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c133
-rw-r--r--arch/arm/mach-omap2/devices.c17
-rw-r--r--arch/arm/mach-omap2/dpll3xxx.c9
-rw-r--r--arch/arm/mach-omap2/dpll44xx.c69
-rw-r--r--arch/arm/mach-omap2/dsp.c1
-rw-r--r--arch/arm/mach-omap2/hsmmc.c16
-rw-r--r--arch/arm/mach-omap2/id.c5
-rw-r--r--arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h8
-rw-r--r--arch/arm/mach-omap2/io.c6
-rw-r--r--arch/arm/mach-omap2/mailbox.c1
-rw-r--r--arch/arm/mach-omap2/omap-iommu.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c17
-rw-r--r--arch/arm/mach-omap2/omap_l3_noc.c1
-rw-r--r--arch/arm/mach-omap2/pm34xx.c70
-rw-r--r--arch/arm/mach-omap2/powerdomain.c14
-rw-r--r--arch/arm/mach-omap2/powerdomain.h2
-rw-r--r--arch/arm/mach-omap2/smartreflex.c1
-rw-r--r--arch/arm/mach-omap2/timer.c12
-rw-r--r--arch/arm/mach-omap2/usb-musb.c38
-rw-r--r--arch/arm/mach-orion5x/ts78xx-setup.c2
-rw-r--r--arch/arm/mach-picoxcell/include/mach/debug-macro.S2
-rw-r--r--arch/arm/mach-pxa/cm-x300.c5
-rw-r--r--arch/arm/mach-pxa/colibri-pxa3xx.c5
-rw-r--r--arch/arm/mach-pxa/littleton.c5
-rw-r--r--arch/arm/mach-pxa/mxm8x10.c9
-rw-r--r--arch/arm/mach-pxa/raumfeld.c5
-rw-r--r--arch/arm/mach-pxa/zylonite.c5
-rw-r--r--arch/arm/mach-shmobile/Makefile2
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c16
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c2
-rw-r--r--arch/arm/mach-shmobile/board-kota2.c7
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c8
-rw-r--r--arch/arm/mach-shmobile/cpuidle.c52
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h4
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh73a0.h8
-rw-r--r--arch/arm/mach-shmobile/pfc-sh7367.c122
-rw-r--r--arch/arm/mach-shmobile/pfc-sh7372.c262
-rw-r--r--arch/arm/mach-shmobile/pfc-sh7377.c159
-rw-r--r--arch/arm/mach-shmobile/pfc-sh73a0.c193
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c14
-rw-r--r--arch/arm/mach-tegra/board-dt.c13
-rw-r--r--arch/arm/mach-tegra/board-harmony-pinmux.c6
-rw-r--r--arch/arm/mach-tegra/board-paz00-pinmux.c6
-rw-r--r--arch/arm/mach-tegra/board-seaboard-pinmux.c6
-rw-r--r--arch/arm/mach-tegra/board-trimslice-pinmux.c5
-rw-r--r--arch/arm/plat-mxc/Kconfig4
-rw-r--r--arch/arm/plat-mxc/avic.c1
-rw-r--r--arch/arm/plat-mxc/gic.c11
-rw-r--r--arch/arm/plat-mxc/include/mach/entry-macro.S3
-rw-r--r--arch/arm/plat-mxc/tzic.c1
-rw-r--r--arch/arm/plat-omap/dmtimer.c1
-rw-r--r--arch/arm/plat-omap/i2c.c2
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h17
-rw-r--r--arch/arm/plat-omap/include/plat/dmtimer.h4
-rw-r--r--arch/arm/plat-omap/include/plat/omap-alsa.h123
-rw-r--r--arch/arm/plat-omap/include/plat/omap-pm.h4
-rw-r--r--arch/arm/plat-omap/include/plat/omap_device.h2
-rw-r--r--arch/arm/plat-omap/include/plat/omap_hwmod.h2
-rw-r--r--arch/arm/plat-omap/omap-pm-noop.c24
-rw-r--r--arch/arm/plat-omap/omap_device.c3
-rw-r--r--arch/arm/plat-pxa/include/plat/pxa3xx_nand.h20
-rw-r--r--arch/arm/plat-samsung/dma-ops.c1
-rw-r--r--arch/arm/plat-samsung/s3c-dma-ops.c1
-rw-r--r--arch/avr32/boards/atngw100/setup.c10
-rw-r--r--arch/avr32/boards/atstk1000/atstk1002.c11
-rw-r--r--arch/avr32/boards/favr-32/setup.c2
-rw-r--r--arch/avr32/boards/hammerhead/setup.c2
-rw-r--r--arch/avr32/boards/merisc/setup.c2
-rw-r--r--arch/avr32/boards/mimc200/setup.c2
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c8
-rw-r--r--arch/avr32/mach-at32ap/include/mach/board.h10
-rw-r--r--arch/blackfin/include/asm/bfin_serial.h2
-rw-r--r--arch/blackfin/mach-bf518/boards/ezbrd.c14
-rw-r--r--arch/blackfin/mach-bf518/boards/tcm-bf518.c14
-rw-r--r--arch/blackfin/mach-bf527/boards/ad7160eval.c14
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c16
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c16
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c14
-rw-r--r--arch/blackfin/mach-bf527/boards/tll6527m.c14
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c7
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c7
-rw-r--r--arch/blackfin/mach-bf533/boards/cm_bf533.c7
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c7
-rw-r--r--arch/blackfin/mach-bf533/boards/ip0x.c7
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c7
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c16
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c16
-rw-r--r--arch/blackfin/mach-bf537/boards/dnp5370.c16
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c14
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c15
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c16
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c16
-rw-r--r--arch/blackfin/mach-bf538/boards/ezkit.c21
-rw-r--r--arch/blackfin/mach-bf548/boards/cm_bf548.c28
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c28
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c7
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c7
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c7
-rw-r--r--arch/blackfin/mach-bf561/boards/tepla.c7
-rw-r--r--arch/cris/arch-v32/drivers/mach-a3/nandflash.c2
-rw-r--r--arch/cris/arch-v32/drivers/mach-fs/nandflash.c2
-rw-r--r--arch/m68k/Kconfig4
-rw-r--r--arch/m68k/Kconfig.bus9
-rw-r--r--arch/m68k/Kconfig.devices31
-rw-r--r--arch/m68k/amiga/amiints.c168
-rw-r--r--arch/m68k/amiga/cia.c39
-rw-r--r--arch/m68k/apollo/dn_ints.c35
-rw-r--r--arch/m68k/atari/ataints.c274
-rw-r--r--arch/m68k/bvme6000/config.c2
-rw-r--r--arch/m68k/hp300/time.c2
-rw-r--r--arch/m68k/include/asm/hardirq.h5
-rw-r--r--arch/m68k/include/asm/irq.h69
-rw-r--r--arch/m68k/include/asm/macintosh.h2
-rw-r--r--arch/m68k/include/asm/q40ints.h3
-rw-r--r--arch/m68k/kernel/Makefile9
-rw-r--r--arch/m68k/kernel/entry_mm.S7
-rw-r--r--arch/m68k/kernel/ints.c323
-rw-r--r--arch/m68k/mac/baboon.c21
-rw-r--r--arch/m68k/mac/iop.c10
-rw-r--r--arch/m68k/mac/macints.c24
-rw-r--r--arch/m68k/mac/oss.c54
-rw-r--r--arch/m68k/mac/psc.c49
-rw-r--r--arch/m68k/mac/via.c74
-rw-r--r--arch/m68k/mvme147/config.c5
-rw-r--r--arch/m68k/mvme16x/config.c2
-rw-r--r--arch/m68k/q40/q40ints.c60
-rw-r--r--arch/m68k/sun3/sun3ints.c46
-rw-r--r--arch/mips/Makefile4
-rw-r--r--arch/powerpc/boot/dts/charon.dts236
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig20
-rw-r--r--arch/powerpc/configs/ppc64_defconfig4
-rw-r--r--arch/powerpc/configs/pseries_defconfig4
-rw-r--r--arch/powerpc/include/asm/floppy.h4
-rw-r--r--arch/powerpc/include/asm/lv1call.h2
-rw-r--r--arch/powerpc/include/asm/xics.h4
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S6
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/mm/mem.c30
-rw-r--r--arch/powerpc/mm/numa.c24
-rw-r--r--arch/powerpc/platforms/52xx/mpc5200_simple.c1
-rw-r--r--arch/powerpc/platforms/cell/beat.c2
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c2
-rw-r--r--arch/powerpc/platforms/cell/iommu.c3
-rw-r--r--arch/powerpc/platforms/cell/pmu.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c9
-rw-r--r--arch/powerpc/platforms/powermac/pic.c1
-rw-r--r--arch/powerpc/platforms/powermac/smp.c4
-rw-r--r--arch/powerpc/platforms/ps3/device-init.c2
-rw-r--r--arch/powerpc/platforms/ps3/repository.c32
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/ppc4xx_soc.c2
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c5
-rw-r--r--arch/sh/include/asm/page.h5
-rw-r--r--arch/sh/include/asm/unistd_32.h4
-rw-r--r--arch/sh/include/asm/unistd_64.h4
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7203.c16
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c28
-rw-r--r--arch/sh/kernel/syscalls_32.S2
-rw-r--r--arch/sh/kernel/syscalls_64.S2
-rw-r--r--arch/sparc/include/asm/unistd.h4
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/x86/platform/ce4100/ce4100.c2
-rw-r--r--arch/x86/platform/mrst/mrst.c1
-rw-r--r--arch/x86/platform/mrst/pmu.c2
-rw-r--r--arch/x86/platform/mrst/vrtc.c4
-rw-r--r--crypto/ablkcipher.c14
-rw-r--r--crypto/aead.c14
-rw-r--r--crypto/ahash.c7
-rw-r--r--crypto/blkcipher.c7
-rw-r--r--crypto/crypto_user.c3
-rw-r--r--crypto/pcompress.c7
-rw-r--r--crypto/rng.c7
-rw-r--r--crypto/shash.c7
-rw-r--r--drivers/acpi/acpica/hwregs.c11
-rw-r--r--drivers/acpi/atomicio.c2
-rw-r--r--drivers/acpi/bus.c8
-rw-r--r--drivers/acpi/processor_driver.c20
-rw-r--r--drivers/acpi/processor_idle.c254
-rw-r--r--drivers/acpi/scan.c3
-rw-r--r--drivers/acpi/sysfs.c14
-rw-r--r--drivers/ata/ahci.c3
-rw-r--r--drivers/ata/ahci_platform.c4
-rw-r--r--drivers/ata/libata-eh.c12
-rw-r--r--drivers/ata/libata-pmp.c7
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/ata/pata_of_platform.c2
-rw-r--r--drivers/ata/sata_sis.c2
-rw-r--r--drivers/base/power/opp.c2
-rw-r--r--drivers/bluetooth/ath3k.c4
-rw-r--r--drivers/bluetooth/bcm203x.c12
-rw-r--r--drivers/bluetooth/bfusb.c13
-rw-r--r--drivers/char/agp/intel-gtt.c7
-rw-r--r--drivers/cpufreq/db8500-cpufreq.c6
-rw-r--r--drivers/cpuidle/cpuidle.c86
-rw-r--r--drivers/cpuidle/driver.c25
-rw-r--r--drivers/cpuidle/governors/ladder.c41
-rw-r--r--drivers/cpuidle/governors/menu.c29
-rw-r--r--drivers/cpuidle/sysfs.c22
-rw-r--r--drivers/gpio/gpio-omap.c59
-rw-r--r--drivers/gpio/gpio-pca953x.c11
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/drm_crtc.c12
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c13
-rw-r--r--drivers/gpu/drm/drm_debugfs.c12
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_irq.c22
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c16
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c62
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c22
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2369
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c71
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c20
-rw-r--r--drivers/gpu/drm/radeon/ni.c25
-rw-r--r--drivers/gpu/drm/radeon/r100.c7
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/r600.c224
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon.h107
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c118
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c90
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c2151
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c71
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c60
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h14
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c18
-rw-r--r--drivers/gpu/drm/radeon/rs400.c5
-rw-r--r--drivers/gpu/drm/radeon/rs600.c17
-rw-r--r--drivers/gpu/drm/radeon/rv770.c73
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c165
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c94
-rw-r--r--drivers/hwspinlock/u8500_hsem.c7
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/ide/ide-floppy.c1
-rw-r--r--drivers/ide/ide-tape.c1
-rw-r--r--drivers/idle/intel_idle.c130
-rw-r--r--drivers/iommu/omap-iommu-debug.c1
-rw-r--r--drivers/iommu/omap-iovmm.c1
-rw-r--r--drivers/macintosh/via-macii.c2
-rw-r--r--drivers/macintosh/via-maciisi.c4
-rw-r--r--drivers/md/dm-bufio.c1
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c2
-rw-r--r--drivers/md/persistent-data/dm-btree.c2
-rw-r--r--drivers/md/persistent-data/dm-space-map-checker.c1
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c2
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c2
-rw-r--r--drivers/md/raid5.c16
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-i2c.c3
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-phy.c7
-rw-r--r--drivers/media/video/s5k6aa.c1
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c4
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c4
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c6
-rw-r--r--drivers/media/video/v4l2-ctrls.c5
-rw-r--r--drivers/media/video/v4l2-event.c10
-rw-r--r--drivers/media/video/videobuf2-core.c6
-rw-r--r--drivers/mfd/ab5500-core.c1
-rw-r--r--drivers/mfd/ab5500-debugfs.c1
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c8
-rw-r--r--drivers/mtd/Kconfig21
-rw-r--r--drivers/mtd/Makefile2
-rw-r--r--drivers/mtd/afs.c4
-rw-r--r--drivers/mtd/ar7part.c2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c31
-rw-r--r--drivers/mtd/chips/fwh_lock.h3
-rw-r--r--drivers/mtd/chips/jedec_probe.c34
-rw-r--r--drivers/mtd/cmdlinepart.c7
-rw-r--r--drivers/mtd/devices/Kconfig13
-rw-r--r--drivers/mtd/devices/Makefile3
-rw-r--r--drivers/mtd/devices/doc2000.c17
-rw-r--r--drivers/mtd/devices/doc2001.c11
-rw-r--r--drivers/mtd/devices/doc2001plus.c11
-rw-r--r--drivers/mtd/devices/docecc.c2
-rw-r--r--drivers/mtd/devices/docg3.c1114
-rw-r--r--drivers/mtd/devices/docg3.h297
-rw-r--r--drivers/mtd/devices/docprobe.c5
-rw-r--r--drivers/mtd/devices/lart.c18
-rw-r--r--drivers/mtd/devices/m25p80.c92
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c91
-rw-r--r--drivers/mtd/devices/sst25l.c42
-rw-r--r--drivers/mtd/ftl.c40
-rw-r--r--drivers/mtd/inftlcore.c69
-rw-r--r--drivers/mtd/inftlmount.c116
-rw-r--r--drivers/mtd/maps/Kconfig26
-rw-r--r--drivers/mtd/maps/Makefile2
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c1
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c16
-rw-r--r--drivers/mtd/maps/ceiva.c341
-rw-r--r--drivers/mtd/maps/dc21285.c9
-rw-r--r--drivers/mtd/maps/edb7312.c134
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c16
-rw-r--r--drivers/mtd/maps/h720x-flash.c23
-rw-r--r--drivers/mtd/maps/impa7.c28
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c7
-rw-r--r--drivers/mtd/maps/ixp2000.c11
-rw-r--r--drivers/mtd/maps/ixp4xx.c29
-rw-r--r--drivers/mtd/maps/lantiq-flash.c17
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c24
-rw-r--r--drivers/mtd/maps/pcmciamtd.c124
-rw-r--r--drivers/mtd/maps/physmap.c38
-rw-r--r--drivers/mtd/maps/physmap_of.c80
-rw-r--r--drivers/mtd/maps/plat-ram.c23
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c20
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c24
-rw-r--r--drivers/mtd/maps/sa1100-flash.c30
-rw-r--r--drivers/mtd/maps/solutionengine.c30
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c33
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/mtdblock.c18
-rw-r--r--drivers/mtd/mtdchar.c162
-rw-r--r--drivers/mtd/mtdconcat.c10
-rw-r--r--drivers/mtd/mtdcore.c70
-rw-r--r--drivers/mtd/mtdcore.h3
-rw-r--r--drivers/mtd/mtdoops.c2
-rw-r--r--drivers/mtd/mtdpart.c62
-rw-r--r--drivers/mtd/mtdsuper.c20
-rw-r--r--drivers/mtd/mtdswap.c31
-rw-r--r--drivers/mtd/nand/Kconfig29
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/atmel_nand.c74
-rw-r--r--drivers/mtd/nand/au1550nd.c29
-rw-r--r--drivers/mtd/nand/autcpu12.c4
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c57
-rw-r--r--drivers/mtd/nand/cafe_nand.c21
-rw-r--r--drivers/mtd/nand/cmx270_nand.c23
-rw-r--r--drivers/mtd/nand/cs553x_nand.c15
-rw-r--r--drivers/mtd/nand/davinci_nand.c39
-rw-r--r--drivers/mtd/nand/denali.c6
-rw-r--r--drivers/mtd/nand/diskonchip.c8
-rw-r--r--drivers/mtd/nand/edb7312.c203
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c75
-rw-r--r--drivers/mtd/nand/fsl_upm.c16
-rw-r--r--drivers/mtd/nand/fsmc_nand.c77
-rw-r--r--drivers/mtd/nand/gpmi-nand/Makefile3
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h84
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c1057
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c1619
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h273
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-regs.h172
-rw-r--r--drivers/mtd/nand/h1910.c19
-rw-r--r--drivers/mtd/nand/jz4740_nand.c18
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c22
-rw-r--r--drivers/mtd/nand/mxc_nand.c37
-rw-r--r--drivers/mtd/nand/nand_base.c1109
-rw-r--r--drivers/mtd/nand/nand_bbt.c692
-rw-r--r--drivers/mtd/nand/nand_bch.c4
-rw-r--r--drivers/mtd/nand/nand_ecc.c10
-rw-r--r--drivers/mtd/nand/nandsim.c4
-rw-r--r--drivers/mtd/nand/ndfc.c22
-rw-r--r--drivers/mtd/nand/nomadik_nand.c1
-rw-r--r--drivers/mtd/nand/nuc900_nand.c1
-rw-r--r--drivers/mtd/nand/omap2.c22
-rw-r--r--drivers/mtd/nand/orion_nand.c16
-rw-r--r--drivers/mtd/nand/pasemi_nand.c3
-rw-r--r--drivers/mtd/nand/plat_nand.c25
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c47
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c471
-rw-r--r--drivers/mtd/nand/r852.c6
-rw-r--r--drivers/mtd/nand/rtc_from4.c5
-rw-r--r--drivers/mtd/nand/s3c2410.c27
-rw-r--r--drivers/mtd/nand/sharpsl.c13
-rw-r--r--drivers/mtd/nand/sm_common.c2
-rw-r--r--drivers/mtd/nand/socrates_nand.c28
-rw-r--r--drivers/mtd/nand/tmio_nand.c17
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c8
-rw-r--r--drivers/mtd/nftlcore.c37
-rw-r--r--drivers/mtd/nftlmount.c26
-rw-r--r--drivers/mtd/ofpart.c112
-rw-r--r--drivers/mtd/onenand/generic.c14
-rw-r--r--drivers/mtd/onenand/omap2.c16
-rw-r--r--drivers/mtd/onenand/onenand_base.c114
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c8
-rw-r--r--drivers/mtd/onenand/samsung.c13
-rw-r--r--drivers/mtd/redboot.c16
-rw-r--r--drivers/mtd/sm_ftl.c26
-rw-r--r--drivers/mtd/ssfdc.c46
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c33
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c37
-rw-r--r--drivers/mtd/tests/mtd_readtest.c13
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c17
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c11
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c17
-rw-r--r--drivers/mtd/tests/mtd_torturetest.c11
-rw-r--r--drivers/mtd/ubi/eba.c2
-rw-r--r--drivers/mtd/ubi/io.c24
-rw-r--r--drivers/mtd/ubi/kapi.c2
-rw-r--r--drivers/mtd/ubi/misc.c2
-rw-r--r--drivers/mtd/ubi/scan.c4
-rw-r--r--drivers/mtd/ubi/vtbl.c2
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/bonding/bond_procfs.c4
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c195
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h21
-rw-r--r--drivers/net/ethernet/cadence/Kconfig16
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c3
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.h4
-rw-r--r--drivers/net/ethernet/cadence/macb.c344
-rw-r--r--drivers/net/ethernet/cadence/macb.h150
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/Kconfig6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c11
-rw-r--r--drivers/net/ethernet/natsemi/Kconfig5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c88
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/usb/usbnet.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h34
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c11
-rw-r--r--drivers/net/wireless/b43/xmit.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c12
-rw-r--r--drivers/net/wireless/libertas/cfg.c25
-rw-r--r--drivers/net/wireless/libertas/cfg.h1
-rw-r--r--drivers/net/wireless/libertas/main.c6
-rw-r--r--drivers/pinctrl/Kconfig22
-rw-r--r--drivers/platform/x86/Kconfig4
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c62
-rw-r--r--drivers/ps3/ps3-vuart.c2
-rw-r--r--drivers/ps3/ps3stor_lib.c2
-rw-r--r--drivers/rtc/rtc-mrst.c19
-rw-r--r--drivers/sh/Makefile8
-rw-r--r--drivers/sh/clk/core.c107
-rw-r--r--drivers/sh/pm_runtime.c (renamed from arch/arm/mach-shmobile/pm_runtime.c)4
-rw-r--r--drivers/spi/spi-atmel.c5
-rw-r--r--drivers/staging/spectra/lld_mtd.c6
-rw-r--r--drivers/thermal/thermal_sys.c4
-rw-r--r--drivers/tty/n_gsm.c12
-rw-r--r--drivers/tty/serial/sh-sci.c19
-rw-r--r--drivers/virtio/virtio_pci.c11
-rw-r--r--fs/btrfs/btrfs_inode.h4
-rw-r--r--fs/btrfs/delayed-inode.c58
-rw-r--r--fs/btrfs/disk-io.c42
-rw-r--r--fs/btrfs/extent-tree.c50
-rw-r--r--fs/btrfs/free-space-cache.c17
-rw-r--r--fs/btrfs/inode-map.c28
-rw-r--r--fs/btrfs/inode.c84
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/scrub.c64
-rw-r--r--fs/btrfs/super.c49
-rw-r--r--fs/btrfs/transaction.c4
-rw-r--r--fs/btrfs/volumes.c5
-rw-r--r--fs/cifs/file.c105
-rw-r--r--fs/dcache.c6
-rw-r--r--fs/hfs/trans.c2
-rw-r--r--fs/jffs2/compr.c128
-rw-r--r--fs/jffs2/compr.h2
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jffs2/jffs2_fs_sb.h6
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jffs2/scan.c4
-rw-r--r--fs/jffs2/super.c119
-rw-r--r--fs/jffs2/wbuf.c9
-rw-r--r--fs/namei.c16
-rw-r--r--fs/proc/base.c146
-rw-r--r--fs/ubifs/debug.c16
-rw-r--r--fs/ubifs/debug.h5
-rw-r--r--fs/ubifs/recovery.c2
-rw-r--r--fs/ubifs/sb.c2
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--fs/xfs/xfs_buf_item.c2
-rw-r--r--fs/xfs/xfs_dquot_item.c6
-rw-r--r--fs/xfs/xfs_extfree_item.c4
-rw-r--r--fs/xfs/xfs_inode_item.c2
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_log.h2
-rw-r--r--fs/xfs/xfs_trans.h6
-rw-r--r--fs/xfs/xfs_vnodeops.c14
-rw-r--r--include/acpi/acpi_drivers.h2
-rw-r--r--include/acpi/actypes.h1
-rw-r--r--include/acpi/processor.h1
-rw-r--r--include/drm/drmP.h4
-rw-r--r--include/drm/drm_dp_helper.h3
-rw-r--r--include/drm/drm_mode.h12
-rw-r--r--include/drm/exynos_drm.h2
-rw-r--r--include/drm/vmwgfx_drm.h51
-rw-r--r--include/linux/cpuidle.h52
-rw-r--r--include/linux/devfreq.h2
-rw-r--r--include/linux/ethtool.h2
-rw-r--r--include/linux/hwspinlock.h1
-rw-r--r--include/linux/mfd/wm8994/registers.h15
-rw-r--r--include/linux/mtd/bbm.h39
-rw-r--r--include/linux/mtd/mtd.h82
-rw-r--r--include/linux/mtd/nand.h92
-rw-r--r--include/linux/mtd/onenand.h4
-rw-r--r--include/linux/mtd/partitions.h46
-rw-r--r--include/linux/mtd/physmap.h17
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/pinctrl/pinctrl.h1
-rw-r--r--include/linux/platform_data/macb.h17
-rw-r--r--include/linux/serial_sci.h1
-rw-r--r--include/linux/sh_clk.h4
-rw-r--r--include/linux/sh_pfc.h76
-rw-r--r--include/mtd/mtd-abi.h122
-rw-r--r--include/net/bluetooth/rfcomm.h1
-rw-r--r--include/net/mac80211.h3
-rw-r--r--include/net/netlink.h11
-rw-r--r--kernel/power/qos.c1
-rw-r--r--lib/nlattr.c1
-rw-r--r--mm/page-writeback.c8
-rw-r--r--net/bluetooth/hci_core.c2
-rw-r--r--net/bluetooth/mgmt.c2
-rw-r--r--net/bluetooth/rfcomm/core.c9
-rw-r--r--net/mac80211/cfg.c12
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/mlme.c18
-rw-r--r--net/mac80211/work.c7
-rw-r--r--net/wanrouter/wanproc.c2
-rw-r--r--sound/core/vmaster.c18
-rw-r--r--sound/pci/hda/hda_codec.c64
-rw-r--r--sound/pci/hda/hda_codec.h1
-rw-r--r--sound/pci/hda/hda_local.h16
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_realtek.c13
-rw-r--r--sound/pci/hda/patch_sigmatel.c11
-rw-r--r--sound/pci/intel8x0.c58
-rw-r--r--sound/ppc/snd_ps3.c2
-rw-r--r--sound/soc/codecs/wm8994.c43
-rw-r--r--sound/usb/mixer.c110
-rw-r--r--sound/usb/quirks.c7
-rw-r--r--tools/perf/builtin-record.c13
-rw-r--r--tools/perf/builtin-stat.c20
-rw-r--r--tools/perf/builtin-test.c6
-rw-r--r--tools/perf/builtin-top.c54
-rw-r--r--tools/perf/util/annotate.c9
-rw-r--r--tools/perf/util/debug.c7
-rw-r--r--tools/perf/util/debug.h17
-rw-r--r--tools/perf/util/evlist.c30
-rw-r--r--tools/perf/util/evlist.h2
-rw-r--r--tools/perf/util/evsel.c43
-rw-r--r--tools/perf/util/evsel.h10
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/hist.c3
-rw-r--r--tools/perf/util/hist.h1
-rw-r--r--tools/perf/util/python.c31
-rw-r--r--tools/perf/util/session.c46
-rw-r--r--tools/perf/util/session.h1
-rw-r--r--tools/perf/util/top.h1
-rw-r--r--tools/perf/util/trace-event-info.c2
-rw-r--r--tools/perf/util/ui/browser.c151
-rw-r--r--tools/perf/util/ui/browser.h9
-rw-r--r--tools/perf/util/ui/browsers/annotate.c14
-rw-r--r--tools/perf/util/ui/browsers/hists.c74
-rw-r--r--tools/perf/util/ui/helpline.c16
-rw-r--r--tools/perf/util/ui/helpline.h2
-rw-r--r--tools/perf/util/ui/progress.c65
-rw-r--r--tools/perf/util/ui/progress.h7
-rw-r--r--tools/perf/util/ui/setup.c83
-rw-r--r--tools/perf/util/ui/ui.h3
-rw-r--r--tools/perf/util/ui/util.c182
-rw-r--r--tools/perf/util/ui/util.h8
-rw-r--r--tools/power/x86/turbostat/turbostat.c28
-rwxr-xr-xtools/testing/ktest/ktest.pl515
-rw-r--r--tools/testing/ktest/sample.conf146
691 files changed, 16753 insertions, 11135 deletions
diff --git a/.mailmap b/.mailmap
index a4806f0de852..9b0d0267a3c3 100644
--- a/.mailmap
+++ b/.mailmap
@@ -68,6 +68,7 @@ Juha Yrjola <juha.yrjola@solidboot.com>
68Kay Sievers <kay.sievers@vrfy.org> 68Kay Sievers <kay.sievers@vrfy.org>
69Kenneth W Chen <kenneth.w.chen@intel.com> 69Kenneth W Chen <kenneth.w.chen@intel.com>
70Koushik <raghavendra.koushik@neterion.com> 70Koushik <raghavendra.koushik@neterion.com>
71Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
71Leonid I Ananiev <leonid.i.ananiev@intel.com> 72Leonid I Ananiev <leonid.i.ananiev@intel.com>
72Linas Vepstas <linas@austin.ibm.com> 73Linas Vepstas <linas@austin.ibm.com>
73Mark Brown <broonie@sirena.org.uk> 74Mark Brown <broonie@sirena.org.uk>
@@ -111,3 +112,4 @@ Uwe Kleine-König <ukl@pengutronix.de>
111Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com> 112Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
112Valdis Kletnieks <Valdis.Kletnieks@vt.edu> 113Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
113Takashi YOSHII <takashi.yoshii.zj@renesas.com> 114Takashi YOSHII <takashi.yoshii.zj@renesas.com>
115Yusuke Goda <goda.yusuke@renesas.com>
diff --git a/Documentation/ABI/stable/sysfs-acpi-pmprofile b/Documentation/ABI/stable/sysfs-acpi-pmprofile
new file mode 100644
index 000000000000..964c7a8afb26
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-acpi-pmprofile
@@ -0,0 +1,22 @@
1What: /sys/firmware/acpi/pm_profile
2Date: 03-Nov-2011
3KernelVersion: v3.2
4Contact: linux-acpi@vger.kernel.org
5Description: The ACPI pm_profile sysfs interface exports the platform
6 power management (and performance) requirement expectations
7 as provided by BIOS. The integer value is directly passed as
8 retrieved from the FADT ACPI table.
9Values: For possible values see ACPI specification:
10 5.2.9 Fixed ACPI Description Table (FADT)
11 Field: Preferred_PM_Profile
12
13 Currently these values are defined by spec:
14 0 Unspecified
15 1 Desktop
16 2 Mobile
17 3 Workstation
18 4 Enterprise Server
19 5 SOHO Server
20 6 Appliance PC
21 7 Performance Server
22 >7 Reserved
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index c27915893974..196b8b9dba11 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -32,7 +32,7 @@
32 The Linux DRM layer contains code intended to support the needs 32 The Linux DRM layer contains code intended to support the needs
33 of complex graphics devices, usually containing programmable 33 of complex graphics devices, usually containing programmable
34 pipelines well suited to 3D graphics acceleration. Graphics 34 pipelines well suited to 3D graphics acceleration. Graphics
35 drivers in the kernel can make use of DRM functions to make 35 drivers in the kernel may make use of DRM functions to make
36 tasks like memory management, interrupt handling and DMA easier, 36 tasks like memory management, interrupt handling and DMA easier,
37 and provide a uniform interface to applications. 37 and provide a uniform interface to applications.
38 </para> 38 </para>
@@ -57,10 +57,10 @@
57 existing drivers. 57 existing drivers.
58 </para> 58 </para>
59 <para> 59 <para>
60 First, we'll go over some typical driver initialization 60 First, we go over some typical driver initialization
61 requirements, like setting up command buffers, creating an 61 requirements, like setting up command buffers, creating an
62 initial output configuration, and initializing core services. 62 initial output configuration, and initializing core services.
63 Subsequent sections will cover core internals in more detail, 63 Subsequent sections cover core internals in more detail,
64 providing implementation notes and examples. 64 providing implementation notes and examples.
65 </para> 65 </para>
66 <para> 66 <para>
@@ -74,7 +74,7 @@
74 </para> 74 </para>
75 <para> 75 <para>
76 The core of every DRM driver is struct drm_driver. Drivers 76 The core of every DRM driver is struct drm_driver. Drivers
77 will typically statically initialize a drm_driver structure, 77 typically statically initialize a drm_driver structure,
78 then pass it to drm_init() at load time. 78 then pass it to drm_init() at load time.
79 </para> 79 </para>
80 80
@@ -88,8 +88,8 @@
88 </para> 88 </para>
89 <programlisting> 89 <programlisting>
90 static struct drm_driver driver = { 90 static struct drm_driver driver = {
91 /* don't use mtrr's here, the Xserver or user space app should 91 /* Don't use MTRRs here; the Xserver or userspace app should
92 * deal with them for intel hardware. 92 * deal with them for Intel hardware.
93 */ 93 */
94 .driver_features = 94 .driver_features =
95 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | 95 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
@@ -154,8 +154,8 @@
154 </programlisting> 154 </programlisting>
155 <para> 155 <para>
156 In the example above, taken from the i915 DRM driver, the driver 156 In the example above, taken from the i915 DRM driver, the driver
157 sets several flags indicating what core features it supports. 157 sets several flags indicating what core features it supports;
158 We'll go over the individual callbacks in later sections. Since 158 we go over the individual callbacks in later sections. Since
159 flags indicate which features your driver supports to the DRM 159 flags indicate which features your driver supports to the DRM
160 core, you need to set most of them prior to calling drm_init(). Some, 160 core, you need to set most of them prior to calling drm_init(). Some,
161 like DRIVER_MODESET can be set later based on user supplied parameters, 161 like DRIVER_MODESET can be set later based on user supplied parameters,
@@ -203,8 +203,8 @@
203 <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term> 203 <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
204 <listitem> 204 <listitem>
205 <para> 205 <para>
206 DRIVER_HAVE_IRQ indicates whether the driver has a IRQ 206 DRIVER_HAVE_IRQ indicates whether the driver has an IRQ
207 handler, DRIVER_IRQ_SHARED indicates whether the device &amp; 207 handler. DRIVER_IRQ_SHARED indicates whether the device &amp;
208 handler support shared IRQs (note that this is required of 208 handler support shared IRQs (note that this is required of
209 PCI drivers). 209 PCI drivers).
210 </para> 210 </para>
@@ -214,8 +214,8 @@
214 <term>DRIVER_DMA_QUEUE</term> 214 <term>DRIVER_DMA_QUEUE</term>
215 <listitem> 215 <listitem>
216 <para> 216 <para>
217 If the driver queues DMA requests and completes them 217 Should be set if the driver queues DMA requests and completes them
218 asynchronously, this flag should be set. Deprecated. 218 asynchronously. Deprecated.
219 </para> 219 </para>
220 </listitem> 220 </listitem>
221 </varlistentry> 221 </varlistentry>
@@ -238,7 +238,7 @@
238 </variablelist> 238 </variablelist>
239 <para> 239 <para>
240 In this specific case, the driver requires AGP and supports 240 In this specific case, the driver requires AGP and supports
241 IRQs. DMA, as we'll see, is handled by device specific ioctls 241 IRQs. DMA, as discussed later, is handled by device-specific ioctls
242 in this case. It also supports the kernel mode setting APIs, though 242 in this case. It also supports the kernel mode setting APIs, though
243 unlike in the actual i915 driver source, this example unconditionally 243 unlike in the actual i915 driver source, this example unconditionally
244 exports KMS capability. 244 exports KMS capability.
@@ -269,36 +269,34 @@
269 initial output configuration. 269 initial output configuration.
270 </para> 270 </para>
271 <para> 271 <para>
272 Note that the tasks performed at driver load time must not 272 If compatibility is a concern (e.g. with drivers converted over
273 conflict with DRM client requirements. For instance, if user 273 to the new interfaces from the old ones), care must be taken to
274 prevent device initialization and control that is incompatible with
275 currently active userspace drivers. For instance, if user
274 level mode setting drivers are in use, it would be problematic 276 level mode setting drivers are in use, it would be problematic
275 to perform output discovery &amp; configuration at load time. 277 to perform output discovery &amp; configuration at load time.
276 Likewise, if pre-memory management aware user level drivers are 278 Likewise, if user-level drivers unaware of memory management are
277 in use, memory management and command buffer setup may need to 279 in use, memory management and command buffer setup may need to
278 be omitted. These requirements are driver specific, and care 280 be omitted. These requirements are driver-specific, and care
279 needs to be taken to keep both old and new applications and 281 needs to be taken to keep both old and new applications and
280 libraries working. The i915 driver supports the "modeset" 282 libraries working. The i915 driver supports the "modeset"
281 module parameter to control whether advanced features are 283 module parameter to control whether advanced features are
282 enabled at load time or in legacy fashion. If compatibility is 284 enabled at load time or in legacy fashion.
283 a concern (e.g. with drivers converted over to the new interfaces
284 from the old ones), care must be taken to prevent incompatible
285 device initialization and control with the currently active
286 userspace drivers.
287 </para> 285 </para>
288 286
289 <sect2> 287 <sect2>
290 <title>Driver private &amp; performance counters</title> 288 <title>Driver private &amp; performance counters</title>
291 <para> 289 <para>
292 The driver private hangs off the main drm_device structure and 290 The driver private hangs off the main drm_device structure and
293 can be used for tracking various device specific bits of 291 can be used for tracking various device-specific bits of
294 information, like register offsets, command buffer status, 292 information, like register offsets, command buffer status,
295 register state for suspend/resume, etc. At load time, a 293 register state for suspend/resume, etc. At load time, a
296 driver can simply allocate one and set drm_device.dev_priv 294 driver may simply allocate one and set drm_device.dev_priv
297 appropriately; at unload the driver can free it and set 295 appropriately; it should be freed and drm_device.dev_priv set
298 drm_device.dev_priv to NULL. 296 to NULL when the driver is unloaded.
299 </para> 297 </para>
300 <para> 298 <para>
301 The DRM supports several counters which can be used for rough 299 The DRM supports several counters which may be used for rough
302 performance characterization. Note that the DRM stat counter 300 performance characterization. Note that the DRM stat counter
303 system is not often used by applications, and supporting 301 system is not often used by applications, and supporting
304 additional counters is completely optional. 302 additional counters is completely optional.
@@ -307,15 +305,15 @@
307 These interfaces are deprecated and should not be used. If performance 305 These interfaces are deprecated and should not be used. If performance
308 monitoring is desired, the developer should investigate and 306 monitoring is desired, the developer should investigate and
309 potentially enhance the kernel perf and tracing infrastructure to export 307 potentially enhance the kernel perf and tracing infrastructure to export
310 GPU related performance information to performance monitoring 308 GPU related performance information for consumption by performance
311 tools and applications. 309 monitoring tools and applications.
312 </para> 310 </para>
313 </sect2> 311 </sect2>
314 312
315 <sect2> 313 <sect2>
316 <title>Configuring the device</title> 314 <title>Configuring the device</title>
317 <para> 315 <para>
318 Obviously, device configuration will be device specific. 316 Obviously, device configuration is device-specific.
319 However, there are several common operations: finding a 317 However, there are several common operations: finding a
320 device's PCI resources, mapping them, and potentially setting 318 device's PCI resources, mapping them, and potentially setting
321 up an IRQ handler. 319 up an IRQ handler.
@@ -323,10 +321,10 @@
323 <para> 321 <para>
324 Finding &amp; mapping resources is fairly straightforward. The 322 Finding &amp; mapping resources is fairly straightforward. The
325 DRM wrapper functions, drm_get_resource_start() and 323 DRM wrapper functions, drm_get_resource_start() and
326 drm_get_resource_len() can be used to find BARs on the given 324 drm_get_resource_len(), may be used to find BARs on the given
327 drm_device struct. Once those values have been retrieved, the 325 drm_device struct. Once those values have been retrieved, the
328 driver load function can call drm_addmap() to create a new 326 driver load function can call drm_addmap() to create a new
329 mapping for the BAR in question. Note you'll probably want a 327 mapping for the BAR in question. Note that you probably want a
330 drm_local_map_t in your driver private structure to track any 328 drm_local_map_t in your driver private structure to track any
331 mappings you create. 329 mappings you create.
332<!-- !Fdrivers/gpu/drm/drm_bufs.c drm_get_resource_* --> 330<!-- !Fdrivers/gpu/drm/drm_bufs.c drm_get_resource_* -->
@@ -335,20 +333,20 @@
335 <para> 333 <para>
336 if compatibility with other operating systems isn't a concern 334 if compatibility with other operating systems isn't a concern
337 (DRM drivers can run under various BSD variants and OpenSolaris), 335 (DRM drivers can run under various BSD variants and OpenSolaris),
338 native Linux calls can be used for the above, e.g. pci_resource_* 336 native Linux calls may be used for the above, e.g. pci_resource_*
339 and iomap*/iounmap. See the Linux device driver book for more 337 and iomap*/iounmap. See the Linux device driver book for more
340 info. 338 info.
341 </para> 339 </para>
342 <para> 340 <para>
343 Once you have a register map, you can use the DRM_READn() and 341 Once you have a register map, you may use the DRM_READn() and
344 DRM_WRITEn() macros to access the registers on your device, or 342 DRM_WRITEn() macros to access the registers on your device, or
345 use driver specific versions to offset into your MMIO space 343 use driver-specific versions to offset into your MMIO space
346 relative to a driver specific base pointer (see I915_READ for 344 relative to a driver-specific base pointer (see I915_READ for
347 example). 345 an example).
348 </para> 346 </para>
349 <para> 347 <para>
350 If your device supports interrupt generation, you may want to 348 If your device supports interrupt generation, you may want to
351 setup an interrupt handler at driver load time as well. This 349 set up an interrupt handler when the driver is loaded. This
352 is done using the drm_irq_install() function. If your device 350 is done using the drm_irq_install() function. If your device
353 supports vertical blank interrupts, it should call 351 supports vertical blank interrupts, it should call
354 drm_vblank_init() to initialize the core vblank handling code before 352 drm_vblank_init() to initialize the core vblank handling code before
@@ -357,7 +355,7 @@
357 </para> 355 </para>
358<!--!Fdrivers/char/drm/drm_irq.c drm_irq_install--> 356<!--!Fdrivers/char/drm/drm_irq.c drm_irq_install-->
359 <para> 357 <para>
360 Once your interrupt handler is registered (it'll use your 358 Once your interrupt handler is registered (it uses your
361 drm_driver.irq_handler as the actual interrupt handling 359 drm_driver.irq_handler as the actual interrupt handling
362 function), you can safely enable interrupts on your device, 360 function), you can safely enable interrupts on your device,
363 assuming any other state your interrupt handler uses is also 361 assuming any other state your interrupt handler uses is also
@@ -371,10 +369,10 @@
371 using the pci_map_rom() call, a convenience function that 369 using the pci_map_rom() call, a convenience function that
372 takes care of mapping the actual ROM, whether it has been 370 takes care of mapping the actual ROM, whether it has been
373 shadowed into memory (typically at address 0xc0000) or exists 371 shadowed into memory (typically at address 0xc0000) or exists
374 on the PCI device in the ROM BAR. Note that once you've 372 on the PCI device in the ROM BAR. Note that after the ROM
375 mapped the ROM and extracted any necessary information, be 373 has been mapped and any necessary information has been extracted,
376 sure to unmap it; on many devices the ROM address decoder is 374 it should be unmapped; on many devices, the ROM address decoder is
377 shared with other BARs, so leaving it mapped can cause 375 shared with other BARs, so leaving it mapped could cause
378 undesired behavior like hangs or memory corruption. 376 undesired behavior like hangs or memory corruption.
379<!--!Fdrivers/pci/rom.c pci_map_rom--> 377<!--!Fdrivers/pci/rom.c pci_map_rom-->
380 </para> 378 </para>
@@ -389,9 +387,9 @@
389 should support a memory manager. 387 should support a memory manager.
390 </para> 388 </para>
391 <para> 389 <para>
392 If your driver supports memory management (it should!), you'll 390 If your driver supports memory management (it should!), you
393 need to set that up at load time as well. How you initialize 391 need to set that up at load time as well. How you initialize
394 it depends on which memory manager you're using, TTM or GEM. 392 it depends on which memory manager you're using: TTM or GEM.
395 </para> 393 </para>
396 <sect3> 394 <sect3>
397 <title>TTM initialization</title> 395 <title>TTM initialization</title>
@@ -401,7 +399,7 @@
401 and devices with dedicated video RAM (VRAM), i.e. most discrete 399 and devices with dedicated video RAM (VRAM), i.e. most discrete
402 graphics devices. If your device has dedicated RAM, supporting 400 graphics devices. If your device has dedicated RAM, supporting
403 TTM is desirable. TTM also integrates tightly with your 401 TTM is desirable. TTM also integrates tightly with your
404 driver specific buffer execution function. See the radeon 402 driver-specific buffer execution function. See the radeon
405 driver for examples. 403 driver for examples.
406 </para> 404 </para>
407 <para> 405 <para>
@@ -429,21 +427,21 @@
429 created by the memory manager at runtime. Your global TTM should 427 created by the memory manager at runtime. Your global TTM should
430 have a type of TTM_GLOBAL_TTM_MEM. The size field for the global 428 have a type of TTM_GLOBAL_TTM_MEM. The size field for the global
431 object should be sizeof(struct ttm_mem_global), and the init and 429 object should be sizeof(struct ttm_mem_global), and the init and
432 release hooks should point at your driver specific init and 430 release hooks should point at your driver-specific init and
433 release routines, which will probably eventually call 431 release routines, which probably eventually call
434 ttm_mem_global_init and ttm_mem_global_release respectively. 432 ttm_mem_global_init and ttm_mem_global_release, respectively.
435 </para> 433 </para>
436 <para> 434 <para>
437 Once your global TTM accounting structure is set up and initialized 435 Once your global TTM accounting structure is set up and initialized
438 (done by calling ttm_global_item_ref on the global object you 436 by calling ttm_global_item_ref() on it,
439 just created), you'll need to create a buffer object TTM to 437 you need to create a buffer object TTM to
440 provide a pool for buffer object allocation by clients and the 438 provide a pool for buffer object allocation by clients and the
441 kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO, 439 kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO,
442 and its size should be sizeof(struct ttm_bo_global). Again, 440 and its size should be sizeof(struct ttm_bo_global). Again,
443 driver specific init and release functions can be provided, 441 driver-specific init and release functions may be provided,
444 likely eventually calling ttm_bo_global_init and 442 likely eventually calling ttm_bo_global_init() and
445 ttm_bo_global_release, respectively. Also like the previous 443 ttm_bo_global_release(), respectively. Also, like the previous
446 object, ttm_global_item_ref is used to create an initial reference 444 object, ttm_global_item_ref() is used to create an initial reference
447 count for the TTM, which will call your initialization function. 445 count for the TTM, which will call your initialization function.
448 </para> 446 </para>
449 </sect3> 447 </sect3>
@@ -453,27 +451,26 @@
453 GEM is an alternative to TTM, designed specifically for UMA 451 GEM is an alternative to TTM, designed specifically for UMA
454 devices. It has simpler initialization and execution requirements 452 devices. It has simpler initialization and execution requirements
455 than TTM, but has no VRAM management capability. Core GEM 453 than TTM, but has no VRAM management capability. Core GEM
456 initialization is comprised of a basic drm_mm_init call to create 454 is initialized by calling drm_mm_init() to create
457 a GTT DRM MM object, which provides an address space pool for 455 a GTT DRM MM object, which provides an address space pool for
458 object allocation. In a KMS configuration, the driver will 456 object allocation. In a KMS configuration, the driver
459 need to allocate and initialize a command ring buffer following 457 needs to allocate and initialize a command ring buffer following
460 basic GEM initialization. Most UMA devices have a so-called 458 core GEM initialization. A UMA device usually has what is called a
461 "stolen" memory region, which provides space for the initial 459 "stolen" memory region, which provides space for the initial
462 framebuffer and large, contiguous memory regions required by the 460 framebuffer and large, contiguous memory regions required by the
463 device. This space is not typically managed by GEM, and must 461 device. This space is not typically managed by GEM, and it must
464 be initialized separately into its own DRM MM object. 462 be initialized separately into its own DRM MM object.
465 </para> 463 </para>
466 <para> 464 <para>
467 Initialization will be driver specific, and will depend on 465 Initialization is driver-specific. In the case of Intel
468 the architecture of the device. In the case of Intel
469 integrated graphics chips like 965GM, GEM initialization can 466 integrated graphics chips like 965GM, GEM initialization can
470 be done by calling the internal GEM init function, 467 be done by calling the internal GEM init function,
471 i915_gem_do_init(). Since the 965GM is a UMA device 468 i915_gem_do_init(). Since the 965GM is a UMA device
472 (i.e. it doesn't have dedicated VRAM), GEM will manage 469 (i.e. it doesn't have dedicated VRAM), GEM manages
473 making regular RAM available for GPU operations. Memory set 470 making regular RAM available for GPU operations. Memory set
474 aside by the BIOS (called "stolen" memory by the i915 471 aside by the BIOS (called "stolen" memory by the i915
475 driver) will be managed by the DRM memrange allocator; the 472 driver) is managed by the DRM memrange allocator; the
476 rest of the aperture will be managed by GEM. 473 rest of the aperture is managed by GEM.
477 <programlisting> 474 <programlisting>
478 /* Basic memrange allocator for stolen space (aka vram) */ 475 /* Basic memrange allocator for stolen space (aka vram) */
479 drm_memrange_init(&amp;dev_priv->vram, 0, prealloc_size); 476 drm_memrange_init(&amp;dev_priv->vram, 0, prealloc_size);
@@ -483,7 +480,7 @@
483<!--!Edrivers/char/drm/drm_memrange.c--> 480<!--!Edrivers/char/drm/drm_memrange.c-->
484 </para> 481 </para>
485 <para> 482 <para>
486 Once the memory manager has been set up, we can allocate the 483 Once the memory manager has been set up, we may allocate the
487 command buffer. In the i915 case, this is also done with a 484 command buffer. In the i915 case, this is also done with a
488 GEM function, i915_gem_init_ringbuffer(). 485 GEM function, i915_gem_init_ringbuffer().
489 </para> 486 </para>
@@ -493,16 +490,25 @@
493 <sect2> 490 <sect2>
494 <title>Output configuration</title> 491 <title>Output configuration</title>
495 <para> 492 <para>
496 The final initialization task is output configuration. This involves 493 The final initialization task is output configuration. This involves:
497 finding and initializing the CRTCs, encoders and connectors 494 <itemizedlist>
498 for your device, creating an initial configuration and 495 <listitem>
499 registering a framebuffer console driver. 496 Finding and initializing the CRTCs, encoders, and connectors
497 for the device.
498 </listitem>
499 <listitem>
500 Creating an initial configuration.
501 </listitem>
502 <listitem>
503 Registering a framebuffer console driver.
504 </listitem>
505 </itemizedlist>
500 </para> 506 </para>
501 <sect3> 507 <sect3>
502 <title>Output discovery and initialization</title> 508 <title>Output discovery and initialization</title>
503 <para> 509 <para>
504 Several core functions exist to create CRTCs, encoders and 510 Several core functions exist to create CRTCs, encoders, and
505 connectors, namely drm_crtc_init(), drm_connector_init() and 511 connectors, namely: drm_crtc_init(), drm_connector_init(), and
506 drm_encoder_init(), along with several "helper" functions to 512 drm_encoder_init(), along with several "helper" functions to
507 perform common tasks. 513 perform common tasks.
508 </para> 514 </para>
@@ -555,10 +561,10 @@ void intel_crt_init(struct drm_device *dev)
555 </programlisting> 561 </programlisting>
556 <para> 562 <para>
557 In the example above (again, taken from the i915 driver), a 563 In the example above (again, taken from the i915 driver), a
558 CRT connector and encoder combination is created. A device 564 CRT connector and encoder combination is created. A device-specific
559 specific i2c bus is also created, for fetching EDID data and 565 i2c bus is also created for fetching EDID data and
560 performing monitor detection. Once the process is complete, 566 performing monitor detection. Once the process is complete,
561 the new connector is registered with sysfs, to make its 567 the new connector is registered with sysfs to make its
562 properties available to applications. 568 properties available to applications.
563 </para> 569 </para>
564 <sect4> 570 <sect4>
@@ -567,12 +573,12 @@ void intel_crt_init(struct drm_device *dev)
567 Since many PC-class graphics devices have similar display output 573 Since many PC-class graphics devices have similar display output
568 designs, the DRM provides a set of helper functions to make 574 designs, the DRM provides a set of helper functions to make
569 output management easier. The core helper routines handle 575 output management easier. The core helper routines handle
570 encoder re-routing and disabling of unused functions following 576 encoder re-routing and the disabling of unused functions following
571 mode set. Using the helpers is optional, but recommended for 577 mode setting. Using the helpers is optional, but recommended for
572 devices with PC-style architectures (i.e. a set of display planes 578 devices with PC-style architectures (i.e. a set of display planes
573 for feeding pixels to encoders which are in turn routed to 579 for feeding pixels to encoders which are in turn routed to
574 connectors). Devices with more complex requirements needing 580 connectors). Devices with more complex requirements needing
575 finer grained management can opt to use the core callbacks 581 finer grained management may opt to use the core callbacks
576 directly. 582 directly.
577 </para> 583 </para>
578 <para> 584 <para>
@@ -580,17 +586,25 @@ void intel_crt_init(struct drm_device *dev)
580 </para> 586 </para>
581 </sect4> 587 </sect4>
582 <para> 588 <para>
583 For each encoder, CRTC and connector, several functions must 589 Each encoder object needs to provide:
584 be provided, depending on the object type. Encoder objects 590 <itemizedlist>
585 need to provide a DPMS (basically on/off) function, mode fixup 591 <listitem>
586 (for converting requested modes into native hardware timings), 592 A DPMS (basically on/off) function.
587 and prepare, set and commit functions for use by the core DRM 593 </listitem>
588 helper functions. Connector helpers need to provide mode fetch and 594 <listitem>
589 validity functions as well as an encoder matching function for 595 A mode-fixup function (for converting requested modes into
590 returning an ideal encoder for a given connector. The core 596 native hardware timings).
591 connector functions include a DPMS callback, (deprecated) 597 </listitem>
592 save/restore routines, detection, mode probing, property handling, 598 <listitem>
593 and cleanup functions. 599 Functions (prepare, set, and commit) for use by the core DRM
600 helper functions.
601 </listitem>
602 </itemizedlist>
603 Connector helpers need to provide functions (mode-fetch, validity,
604 and encoder-matching) for returning an ideal encoder for a given
605 connector. The core connector functions include a DPMS callback,
606 save/restore routines (deprecated), detection, mode probing,
607 property handling, and cleanup functions.
594 </para> 608 </para>
595<!--!Edrivers/char/drm/drm_crtc.h--> 609<!--!Edrivers/char/drm/drm_crtc.h-->
596<!--!Edrivers/char/drm/drm_crtc.c--> 610<!--!Edrivers/char/drm/drm_crtc.c-->
@@ -605,23 +619,34 @@ void intel_crt_init(struct drm_device *dev)
605 <title>VBlank event handling</title> 619 <title>VBlank event handling</title>
606 <para> 620 <para>
607 The DRM core exposes two vertical blank related ioctls: 621 The DRM core exposes two vertical blank related ioctls:
608 DRM_IOCTL_WAIT_VBLANK and DRM_IOCTL_MODESET_CTL. 622 <variablelist>
623 <varlistentry>
624 <term>DRM_IOCTL_WAIT_VBLANK</term>
625 <listitem>
626 <para>
627 This takes a struct drm_wait_vblank structure as its argument,
628 and it is used to block or request a signal when a specified
629 vblank event occurs.
630 </para>
631 </listitem>
632 </varlistentry>
633 <varlistentry>
634 <term>DRM_IOCTL_MODESET_CTL</term>
635 <listitem>
636 <para>
637 This should be called by application level drivers before and
638 after mode setting, since on many devices the vertical blank
639 counter is reset at that time. Internally, the DRM snapshots
640 the last vblank count when the ioctl is called with the
641 _DRM_PRE_MODESET command, so that the counter won't go backwards
642 (which is dealt with when _DRM_POST_MODESET is used).
643 </para>
644 </listitem>
645 </varlistentry>
646 </variablelist>
609<!--!Edrivers/char/drm/drm_irq.c--> 647<!--!Edrivers/char/drm/drm_irq.c-->
610 </para> 648 </para>
611 <para> 649 <para>
612 DRM_IOCTL_WAIT_VBLANK takes a struct drm_wait_vblank structure
613 as its argument, and is used to block or request a signal when a
614 specified vblank event occurs.
615 </para>
616 <para>
617 DRM_IOCTL_MODESET_CTL should be called by application level
618 drivers before and after mode setting, since on many devices the
619 vertical blank counter will be reset at that time. Internally,
620 the DRM snapshots the last vblank count when the ioctl is called
621 with the _DRM_PRE_MODESET command so that the counter won't go
622 backwards (which is dealt with when _DRM_POST_MODESET is used).
623 </para>
624 <para>
625 To support the functions above, the DRM core provides several 650 To support the functions above, the DRM core provides several
626 helper functions for tracking vertical blank counters, and 651 helper functions for tracking vertical blank counters, and
627 requires drivers to provide several callbacks: 652 requires drivers to provide several callbacks:
@@ -632,24 +657,24 @@ void intel_crt_init(struct drm_device *dev)
632 register. The enable and disable vblank callbacks should enable 657 register. The enable and disable vblank callbacks should enable
633 and disable vertical blank interrupts, respectively. In the 658 and disable vertical blank interrupts, respectively. In the
634 absence of DRM clients waiting on vblank events, the core DRM 659 absence of DRM clients waiting on vblank events, the core DRM
635 code will use the disable_vblank() function to disable 660 code uses the disable_vblank() function to disable
636 interrupts, which saves power. They'll be re-enabled again when 661 interrupts, which saves power. They are re-enabled again when
637 a client calls the vblank wait ioctl above. 662 a client calls the vblank wait ioctl above.
638 </para> 663 </para>
639 <para> 664 <para>
640 Devices that don't provide a count register can simply use an 665 A device that doesn't provide a count register may simply use an
641 internal atomic counter incremented on every vertical blank 666 internal atomic counter incremented on every vertical blank
642 interrupt, and can make their enable and disable vblank 667 interrupt (and then treat the enable_vblank() and disable_vblank()
643 functions into no-ops. 668 callbacks as no-ops).
644 </para> 669 </para>
645 </sect1> 670 </sect1>
646 671
647 <sect1> 672 <sect1>
648 <title>Memory management</title> 673 <title>Memory management</title>
649 <para> 674 <para>
650 The memory manager lies at the heart of many DRM operations, and 675 The memory manager lies at the heart of many DRM operations; it
651 is also required to support advanced client features like OpenGL 676 is required to support advanced client features like OpenGL
652 pbuffers. The DRM currently contains two memory managers, TTM 677 pbuffers. The DRM currently contains two memory managers: TTM
653 and GEM. 678 and GEM.
654 </para> 679 </para>
655 680
@@ -679,41 +704,46 @@ void intel_crt_init(struct drm_device *dev)
679 <para> 704 <para>
680 GEM-enabled drivers must provide gem_init_object() and 705 GEM-enabled drivers must provide gem_init_object() and
681 gem_free_object() callbacks to support the core memory 706 gem_free_object() callbacks to support the core memory
682 allocation routines. They should also provide several driver 707 allocation routines. They should also provide several driver-specific
683 specific ioctls to support command execution, pinning, buffer 708 ioctls to support command execution, pinning, buffer
684 read &amp; write, mapping, and domain ownership transfers. 709 read &amp; write, mapping, and domain ownership transfers.
685 </para> 710 </para>
686 <para> 711 <para>
687 On a fundamental level, GEM involves several operations: memory 712 On a fundamental level, GEM involves several operations:
688 allocation and freeing, command execution, and aperture management 713 <itemizedlist>
689 at command execution time. Buffer object allocation is relatively 714 <listitem>Memory allocation and freeing</listitem>
715 <listitem>Command execution</listitem>
716 <listitem>Aperture management at command execution time</listitem>
717 </itemizedlist>
718 Buffer object allocation is relatively
690 straightforward and largely provided by Linux's shmem layer, which 719 straightforward and largely provided by Linux's shmem layer, which
691 provides memory to back each object. When mapped into the GTT 720 provides memory to back each object. When mapped into the GTT
692 or used in a command buffer, the backing pages for an object are 721 or used in a command buffer, the backing pages for an object are
693 flushed to memory and marked write combined so as to be coherent 722 flushed to memory and marked write combined so as to be coherent
694 with the GPU. Likewise, when the GPU finishes rendering to an object, 723 with the GPU. Likewise, if the CPU accesses an object after the GPU
695 if the CPU accesses it, it must be made coherent with the CPU's view 724 has finished rendering to the object, then the object must be made
725 coherent with the CPU's view
696 of memory, usually involving GPU cache flushing of various kinds. 726 of memory, usually involving GPU cache flushing of various kinds.
697 This core CPU&lt;-&gt;GPU coherency management is provided by the GEM 727 This core CPU&lt;-&gt;GPU coherency management is provided by a
698 set domain function, which evaluates an object's current domain and 728 device-specific ioctl, which evaluates an object's current domain and
699 performs any necessary flushing or synchronization to put the object 729 performs any necessary flushing or synchronization to put the object
700 into the desired coherency domain (note that the object may be busy, 730 into the desired coherency domain (note that the object may be busy,
701 i.e. an active render target; in that case the set domain function 731 i.e. an active render target; in that case, setting the domain
702 will block the client and wait for rendering to complete before 732 blocks the client and waits for rendering to complete before
703 performing any necessary flushing operations). 733 performing any necessary flushing operations).
704 </para> 734 </para>
705 <para> 735 <para>
706 Perhaps the most important GEM function is providing a command 736 Perhaps the most important GEM function is providing a command
707 execution interface to clients. Client programs construct command 737 execution interface to clients. Client programs construct command
708 buffers containing references to previously allocated memory objects 738 buffers containing references to previously allocated memory objects,
709 and submit them to GEM. At that point, GEM will take care to bind 739 and then submit them to GEM. At that point, GEM takes care to bind
710 all the objects into the GTT, execute the buffer, and provide 740 all the objects into the GTT, execute the buffer, and provide
711 necessary synchronization between clients accessing the same buffers. 741 necessary synchronization between clients accessing the same buffers.
712 This often involves evicting some objects from the GTT and re-binding 742 This often involves evicting some objects from the GTT and re-binding
713 others (a fairly expensive operation), and providing relocation 743 others (a fairly expensive operation), and providing relocation
714 support which hides fixed GTT offsets from clients. Clients must 744 support which hides fixed GTT offsets from clients. Clients must
715 take care not to submit command buffers that reference more objects 745 take care not to submit command buffers that reference more objects
716 than can fit in the GTT or GEM will reject them and no rendering 746 than can fit in the GTT; otherwise, GEM will reject them and no rendering
717 will occur. Similarly, if several objects in the buffer require 747 will occur. Similarly, if several objects in the buffer require
718 fence registers to be allocated for correct rendering (e.g. 2D blits 748 fence registers to be allocated for correct rendering (e.g. 2D blits
719 on pre-965 chips), care must be taken not to require more fence 749 on pre-965 chips), care must be taken not to require more fence
@@ -729,7 +759,7 @@ void intel_crt_init(struct drm_device *dev)
729 <title>Output management</title> 759 <title>Output management</title>
730 <para> 760 <para>
731 At the core of the DRM output management code is a set of 761 At the core of the DRM output management code is a set of
732 structures representing CRTCs, encoders and connectors. 762 structures representing CRTCs, encoders, and connectors.
733 </para> 763 </para>
734 <para> 764 <para>
735 A CRTC is an abstraction representing a part of the chip that 765 A CRTC is an abstraction representing a part of the chip that
@@ -765,21 +795,19 @@ void intel_crt_init(struct drm_device *dev)
765 <sect1> 795 <sect1>
766 <title>Framebuffer management</title> 796 <title>Framebuffer management</title>
767 <para> 797 <para>
768 In order to set a mode on a given CRTC, encoder and connector 798 Clients need to provide a framebuffer object which provides a source
769 configuration, clients need to provide a framebuffer object which 799 of pixels for a CRTC to deliver to the encoder(s) and ultimately the
770 will provide a source of pixels for the CRTC to deliver to the encoder(s) 800 connector(s). A framebuffer is fundamentally a driver-specific memory
771 and ultimately the connector(s) in the configuration. A framebuffer 801 object, made into an opaque handle by the DRM's addfb() function.
772 is fundamentally a driver specific memory object, made into an opaque 802 Once a framebuffer has been created this way, it may be passed to the
773 handle by the DRM addfb function. Once an fb has been created this 803 KMS mode setting routines for use in a completed configuration.
774 way it can be passed to the KMS mode setting routines for use in
775 a configuration.
776 </para> 804 </para>
777 </sect1> 805 </sect1>
778 806
779 <sect1> 807 <sect1>
780 <title>Command submission &amp; fencing</title> 808 <title>Command submission &amp; fencing</title>
781 <para> 809 <para>
782 This should cover a few device specific command submission 810 This should cover a few device-specific command submission
783 implementations. 811 implementations.
784 </para> 812 </para>
785 </sect1> 813 </sect1>
@@ -789,7 +817,7 @@ void intel_crt_init(struct drm_device *dev)
789 <para> 817 <para>
790 The DRM core provides some suspend/resume code, but drivers 818 The DRM core provides some suspend/resume code, but drivers
791 wanting full suspend/resume support should provide save() and 819 wanting full suspend/resume support should provide save() and
792 restore() functions. These will be called at suspend, 820 restore() functions. These are called at suspend,
793 hibernate, or resume time, and should perform any state save or 821 hibernate, or resume time, and should perform any state save or
794 restore required by your device across suspend or hibernate 822 restore required by your device across suspend or hibernate
795 states. 823 states.
@@ -812,8 +840,8 @@ void intel_crt_init(struct drm_device *dev)
812 <para> 840 <para>
813 The DRM core exports several interfaces to applications, 841 The DRM core exports several interfaces to applications,
814 generally intended to be used through corresponding libdrm 842 generally intended to be used through corresponding libdrm
815 wrapper functions. In addition, drivers export device specific 843 wrapper functions. In addition, drivers export device-specific
816 interfaces for use by userspace drivers &amp; device aware 844 interfaces for use by userspace drivers &amp; device-aware
817 applications through ioctls and sysfs files. 845 applications through ioctls and sysfs files.
818 </para> 846 </para>
819 <para> 847 <para>
@@ -822,8 +850,8 @@ void intel_crt_init(struct drm_device *dev)
822 management, memory management, and output management. 850 management, memory management, and output management.
823 </para> 851 </para>
824 <para> 852 <para>
825 Cover generic ioctls and sysfs layout here. Only need high 853 Cover generic ioctls and sysfs layout here. We only need high-level
826 level info, since man pages will cover the rest. 854 info, since man pages should cover the rest.
827 </para> 855 </para>
828 </chapter> 856 </chapter>
829 857
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index 17910e2052ad..0c674be0d3c6 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -572,7 +572,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
572 </para> 572 </para>
573 <para> 573 <para>
574 The simplest way to activate the FLASH based bad block table support 574 The simplest way to activate the FLASH based bad block table support
575 is to set the option NAND_USE_FLASH_BBT in the option field of 575 is to set the option NAND_BBT_USE_FLASH in the bbt_option field of
576 the nand chip structure before calling nand_scan(). For AG-AND 576 the nand chip structure before calling nand_scan(). For AG-AND
577 chips is this done by default. 577 chips is this done by default.
578 This activates the default FLASH based bad block table functionality 578 This activates the default FLASH based bad block table functionality
@@ -773,20 +773,6 @@ struct nand_oobinfo {
773 done according to the default builtin scheme. 773 done according to the default builtin scheme.
774 </para> 774 </para>
775 </sect2> 775 </sect2>
776 <sect2 id="User_space_placement_selection">
777 <title>User space placement selection</title>
778 <para>
779 All non ecc functions like mtd->read and mtd->write use an internal
780 structure, which can be set by an ioctl. This structure is preset
781 to the autoplacement default.
782 <programlisting>
783 ioctl (fd, MEMSETOOBSEL, oobsel);
784 </programlisting>
785 oobsel is a pointer to a user supplied structure of type
786 nand_oobconfig. The contents of this structure must match the
787 criteria of the filesystem, which will be used. See an example in utils/nandwrite.c.
788 </para>
789 </sect2>
790 </sect1> 776 </sect1>
791 <sect1 id="Spare_area_autoplacement_default"> 777 <sect1 id="Spare_area_autoplacement_default">
792 <title>Spare area autoplacement default schemes</title> 778 <title>Spare area autoplacement default schemes</title>
@@ -1158,9 +1144,6 @@ in this page</entry>
1158 These constants are defined in nand.h. They are ored together to describe 1144 These constants are defined in nand.h. They are ored together to describe
1159 the functionality. 1145 the functionality.
1160 <programlisting> 1146 <programlisting>
1161/* Use a flash based bad block table. This option is parsed by the
1162 * default bad block table function (nand_default_bbt). */
1163#define NAND_USE_FLASH_BBT 0x00010000
1164/* The hw ecc generator provides a syndrome instead a ecc value on read 1147/* The hw ecc generator provides a syndrome instead a ecc value on read
1165 * This can only work if we have the ecc bytes directly behind the 1148 * This can only work if we have the ecc bytes directly behind the
1166 * data bytes. Applies for DOC and AG-AND Renesas HW Reed Solomon generators */ 1149 * data bytes. Applies for DOC and AG-AND Renesas HW Reed Solomon generators */
diff --git a/Documentation/cgroups/freezer-subsystem.txt b/Documentation/cgroups/freezer-subsystem.txt
index c21d77742a07..7e62de1e59ff 100644
--- a/Documentation/cgroups/freezer-subsystem.txt
+++ b/Documentation/cgroups/freezer-subsystem.txt
@@ -33,9 +33,9 @@ demonstrate this problem using nested bash shells:
33 33
34 From a second, unrelated bash shell: 34 From a second, unrelated bash shell:
35 $ kill -SIGSTOP 16690 35 $ kill -SIGSTOP 16690
36 $ kill -SIGCONT 16990 36 $ kill -SIGCONT 16690
37 37
38 <at this point 16990 exits and causes 16644 to exit too> 38 <at this point 16690 exits and causes 16644 to exit too>
39 39
40This happens because bash can observe both signals and choose how it 40This happens because bash can observe both signals and choose how it
41responds to them. 41responds to them.
diff --git a/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt b/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt
new file mode 100644
index 000000000000..ef66ddd01da0
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt
@@ -0,0 +1,14 @@
1* Atmel Data Flash
2
3Required properties:
4- compatible : "atmel,<model>", "atmel,<series>", "atmel,dataflash".
5
6Example:
7
8flash@1 {
9 #address-cells = <1>;
10 #size-cells = <1>;
11 compatible = "atmel,at45db321d", "atmel,at45", "atmel,dataflash";
12 spi-max-frequency = <25000000>;
13 reg = <1>;
14};
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 4f3443230d89..edad99abec21 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -349,6 +349,7 @@ STAC92HD83*
349 ref Reference board 349 ref Reference board
350 mic-ref Reference board with power management for ports 350 mic-ref Reference board with power management for ports
351 dell-s14 Dell laptop 351 dell-s14 Dell laptop
352 dell-vostro-3500 Dell Vostro 3500 laptop
352 hp HP laptops with (inverted) mute-LED 353 hp HP laptops with (inverted) mute-LED
353 hp-dv7-4000 HP dv-7 4000 354 hp-dv7-4000 HP dv-7 4000
354 auto BIOS setup (default) 355 auto BIOS setup (default)
diff --git a/Kbuild b/Kbuild
index 4caab4f6cba7..b8b708ad6dc3 100644
--- a/Kbuild
+++ b/Kbuild
@@ -92,7 +92,7 @@ always += missing-syscalls
92targets += missing-syscalls 92targets += missing-syscalls
93 93
94quiet_cmd_syscalls = CALL $< 94quiet_cmd_syscalls = CALL $<
95 cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) 95 cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags)
96 96
97missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE 97missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE
98 $(call cmd,syscalls) 98 $(call cmd,syscalls)
diff --git a/MAINTAINERS b/MAINTAINERS
index 6388a96dc1c4..071a99674347 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1032,6 +1032,7 @@ F: arch/arm/include/asm/hardware/ioc.h
1032F: arch/arm/include/asm/hardware/iomd.h 1032F: arch/arm/include/asm/hardware/iomd.h
1033F: arch/arm/include/asm/hardware/memc.h 1033F: arch/arm/include/asm/hardware/memc.h
1034F: arch/arm/mach-rpc/ 1034F: arch/arm/mach-rpc/
1035F: drivers/net/ethernet/8390/etherh.c
1035F: drivers/net/ethernet/i825xx/ether1* 1036F: drivers/net/ethernet/i825xx/ether1*
1036F: drivers/net/ethernet/seeq/ether3* 1037F: drivers/net/ethernet/seeq/ether3*
1037F: drivers/scsi/arm/ 1038F: drivers/scsi/arm/
@@ -1105,6 +1106,7 @@ F: drivers/media/video/s5p-fimc/
1105ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT 1106ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT
1106M: Kyungmin Park <kyungmin.park@samsung.com> 1107M: Kyungmin Park <kyungmin.park@samsung.com>
1107M: Kamil Debski <k.debski@samsung.com> 1108M: Kamil Debski <k.debski@samsung.com>
1109M: Jeongtae Park <jtp.park@samsung.com>
1108L: linux-arm-kernel@lists.infradead.org 1110L: linux-arm-kernel@lists.infradead.org
1109L: linux-media@vger.kernel.org 1111L: linux-media@vger.kernel.org
1110S: Maintained 1112S: Maintained
@@ -2341,6 +2343,13 @@ S: Supported
2341F: drivers/gpu/drm/i915 2343F: drivers/gpu/drm/i915
2342F: include/drm/i915* 2344F: include/drm/i915*
2343 2345
2346DRM DRIVERS FOR EXYNOS
2347M: Inki Dae <inki.dae@samsung.com>
2348L: dri-devel@lists.freedesktop.org
2349S: Supported
2350F: drivers/gpu/drm/exynos
2351F: include/drm/exynos*
2352
2344DSCC4 DRIVER 2353DSCC4 DRIVER
2345M: Francois Romieu <romieu@fr.zoreil.com> 2354M: Francois Romieu <romieu@fr.zoreil.com>
2346L: netdev@vger.kernel.org 2355L: netdev@vger.kernel.org
@@ -4672,7 +4681,7 @@ L: linux-omap@vger.kernel.org
4672W: http://www.muru.com/linux/omap/ 4681W: http://www.muru.com/linux/omap/
4673W: http://linux.omap.com/ 4682W: http://linux.omap.com/
4674Q: http://patchwork.kernel.org/project/linux-omap/list/ 4683Q: http://patchwork.kernel.org/project/linux-omap/list/
4675T: git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap-2.6.git 4684T: git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap.git
4676S: Maintained 4685S: Maintained
4677F: arch/arm/*omap*/ 4686F: arch/arm/*omap*/
4678 4687
@@ -5470,7 +5479,7 @@ S: Maintained
5470F: drivers/net/ethernet/rdc/r6040.c 5479F: drivers/net/ethernet/rdc/r6040.c
5471 5480
5472RDS - RELIABLE DATAGRAM SOCKETS 5481RDS - RELIABLE DATAGRAM SOCKETS
5473M: Andy Grover <andy.grover@oracle.com> 5482M: Venkat Venkatsubra <venkat.x.venkatsubra@oracle.com>
5474L: rds-devel@oss.oracle.com (moderated for non-subscribers) 5483L: rds-devel@oss.oracle.com (moderated for non-subscribers)
5475S: Supported 5484S: Supported
5476F: net/rds/ 5485F: net/rds/
@@ -6121,7 +6130,7 @@ F: sound/
6121SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) 6130SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
6122M: Liam Girdwood <lrg@ti.com> 6131M: Liam Girdwood <lrg@ti.com>
6123M: Mark Brown <broonie@opensource.wolfsonmicro.com> 6132M: Mark Brown <broonie@opensource.wolfsonmicro.com>
6124T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound-2.6.git 6133T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
6125L: alsa-devel@alsa-project.org (moderated for non-subscribers) 6134L: alsa-devel@alsa-project.org (moderated for non-subscribers)
6126W: http://alsa-project.org/main/index.php/ASoC 6135W: http://alsa-project.org/main/index.php/ASoC
6127S: Supported 6136S: Supported
diff --git a/Makefile b/Makefile
index ed25c5b35470..dab8610c4d6f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 1 2PATCHLEVEL = 2
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = 4EXTRAVERSION = -rc2
5NAME = "Divemaster Edition" 5NAME = Saber-toothed Squirrel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
diff --git a/arch/arm/boot/dts/tegra-ventana.dts b/arch/arm/boot/dts/tegra-ventana.dts
index 9b29a623aaf1..3f9abd6b6964 100644
--- a/arch/arm/boot/dts/tegra-ventana.dts
+++ b/arch/arm/boot/dts/tegra-ventana.dts
@@ -22,11 +22,10 @@
22 sdhci@c8000400 { 22 sdhci@c8000400 {
23 cd-gpios = <&gpio 69 0>; /* gpio PI5 */ 23 cd-gpios = <&gpio 69 0>; /* gpio PI5 */
24 wp-gpios = <&gpio 57 0>; /* gpio PH1 */ 24 wp-gpios = <&gpio 57 0>; /* gpio PH1 */
25 power-gpios = <&gpio 155 0>; /* gpio PT3 */ 25 power-gpios = <&gpio 70 0>; /* gpio PI6 */
26 }; 26 };
27 27
28 sdhci@c8000600 { 28 sdhci@c8000600 {
29 power-gpios = <&gpio 70 0>; /* gpio PI6 */
30 support-8bit; 29 support-8bit;
31 }; 30 };
32}; 31};
diff --git a/arch/arm/mach-at91/at91cap9.c b/arch/arm/mach-at91/at91cap9.c
index ecdd54dd68c6..17632b82dd76 100644
--- a/arch/arm/mach-at91/at91cap9.c
+++ b/arch/arm/mach-at91/at91cap9.c
@@ -137,7 +137,7 @@ static struct clk pwm_clk = {
137 .type = CLK_TYPE_PERIPHERAL, 137 .type = CLK_TYPE_PERIPHERAL,
138}; 138};
139static struct clk macb_clk = { 139static struct clk macb_clk = {
140 .name = "macb_clk", 140 .name = "pclk",
141 .pmc_mask = 1 << AT91CAP9_ID_EMAC, 141 .pmc_mask = 1 << AT91CAP9_ID_EMAC,
142 .type = CLK_TYPE_PERIPHERAL, 142 .type = CLK_TYPE_PERIPHERAL,
143}; 143};
@@ -210,6 +210,8 @@ static struct clk *periph_clocks[] __initdata = {
210}; 210};
211 211
212static struct clk_lookup periph_clocks_lookups[] = { 212static struct clk_lookup periph_clocks_lookups[] = {
213 /* One additional fake clock for macb_hclk */
214 CLKDEV_CON_ID("hclk", &macb_clk),
213 CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk), 215 CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk),
214 CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk), 216 CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk),
215 CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk), 217 CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
diff --git a/arch/arm/mach-at91/at91cap9_devices.c b/arch/arm/mach-at91/at91cap9_devices.c
index a4401d6b5b07..695aecab0a67 100644
--- a/arch/arm/mach-at91/at91cap9_devices.c
+++ b/arch/arm/mach-at91/at91cap9_devices.c
@@ -98,7 +98,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
98 * USB HS Device (Gadget) 98 * USB HS Device (Gadget)
99 * -------------------------------------------------------------------- */ 99 * -------------------------------------------------------------------- */
100 100
101#if defined(CONFIG_USB_GADGET_ATMEL_USBA) || defined(CONFIG_USB_GADGET_ATMEL_USBA_MODULE) 101#if defined(CONFIG_USB_ATMEL_USBA) || defined(CONFIG_USB_ATMEL_USBA_MODULE)
102 102
103static struct resource usba_udc_resources[] = { 103static struct resource usba_udc_resources[] = {
104 [0] = { 104 [0] = {
@@ -200,7 +200,7 @@ void __init at91_add_device_usba(struct usba_platform_data *data) {}
200 200
201#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE) 201#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE)
202static u64 eth_dmamask = DMA_BIT_MASK(32); 202static u64 eth_dmamask = DMA_BIT_MASK(32);
203static struct at91_eth_data eth_data; 203static struct macb_platform_data eth_data;
204 204
205static struct resource eth_resources[] = { 205static struct resource eth_resources[] = {
206 [0] = { 206 [0] = {
@@ -227,7 +227,7 @@ static struct platform_device at91cap9_eth_device = {
227 .num_resources = ARRAY_SIZE(eth_resources), 227 .num_resources = ARRAY_SIZE(eth_resources),
228}; 228};
229 229
230void __init at91_add_device_eth(struct at91_eth_data *data) 230void __init at91_add_device_eth(struct macb_platform_data *data)
231{ 231{
232 if (!data) 232 if (!data)
233 return; 233 return;
@@ -264,7 +264,7 @@ void __init at91_add_device_eth(struct at91_eth_data *data)
264 platform_device_register(&at91cap9_eth_device); 264 platform_device_register(&at91cap9_eth_device);
265} 265}
266#else 266#else
267void __init at91_add_device_eth(struct at91_eth_data *data) {} 267void __init at91_add_device_eth(struct macb_platform_data *data) {}
268#endif 268#endif
269 269
270 270
@@ -1021,8 +1021,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
1021#if defined(CONFIG_SERIAL_ATMEL) 1021#if defined(CONFIG_SERIAL_ATMEL)
1022static struct resource dbgu_resources[] = { 1022static struct resource dbgu_resources[] = {
1023 [0] = { 1023 [0] = {
1024 .start = AT91_VA_BASE_SYS + AT91_DBGU, 1024 .start = AT91_BASE_SYS + AT91_DBGU,
1025 .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, 1025 .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
1026 .flags = IORESOURCE_MEM, 1026 .flags = IORESOURCE_MEM,
1027 }, 1027 },
1028 [1] = { 1028 [1] = {
@@ -1035,7 +1035,6 @@ static struct resource dbgu_resources[] = {
1035static struct atmel_uart_data dbgu_data = { 1035static struct atmel_uart_data dbgu_data = {
1036 .use_dma_tx = 0, 1036 .use_dma_tx = 0,
1037 .use_dma_rx = 0, /* DBGU not capable of receive DMA */ 1037 .use_dma_rx = 0, /* DBGU not capable of receive DMA */
1038 .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
1039}; 1038};
1040 1039
1041static u64 dbgu_dmamask = DMA_BIT_MASK(32); 1040static u64 dbgu_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index 01d8bbd1468b..5610f14e342e 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -135,7 +135,7 @@ void __init at91_add_device_udc(struct at91_udc_data *data) {}
135 135
136#if defined(CONFIG_ARM_AT91_ETHER) || defined(CONFIG_ARM_AT91_ETHER_MODULE) 136#if defined(CONFIG_ARM_AT91_ETHER) || defined(CONFIG_ARM_AT91_ETHER_MODULE)
137static u64 eth_dmamask = DMA_BIT_MASK(32); 137static u64 eth_dmamask = DMA_BIT_MASK(32);
138static struct at91_eth_data eth_data; 138static struct macb_platform_data eth_data;
139 139
140static struct resource eth_resources[] = { 140static struct resource eth_resources[] = {
141 [0] = { 141 [0] = {
@@ -162,7 +162,7 @@ static struct platform_device at91rm9200_eth_device = {
162 .num_resources = ARRAY_SIZE(eth_resources), 162 .num_resources = ARRAY_SIZE(eth_resources),
163}; 163};
164 164
165void __init at91_add_device_eth(struct at91_eth_data *data) 165void __init at91_add_device_eth(struct macb_platform_data *data)
166{ 166{
167 if (!data) 167 if (!data)
168 return; 168 return;
@@ -199,7 +199,7 @@ void __init at91_add_device_eth(struct at91_eth_data *data)
199 platform_device_register(&at91rm9200_eth_device); 199 platform_device_register(&at91rm9200_eth_device);
200} 200}
201#else 201#else
202void __init at91_add_device_eth(struct at91_eth_data *data) {} 202void __init at91_add_device_eth(struct macb_platform_data *data) {}
203#endif 203#endif
204 204
205 205
@@ -877,8 +877,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
877#if defined(CONFIG_SERIAL_ATMEL) 877#if defined(CONFIG_SERIAL_ATMEL)
878static struct resource dbgu_resources[] = { 878static struct resource dbgu_resources[] = {
879 [0] = { 879 [0] = {
880 .start = AT91_VA_BASE_SYS + AT91_DBGU, 880 .start = AT91_BASE_SYS + AT91_DBGU,
881 .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, 881 .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
882 .flags = IORESOURCE_MEM, 882 .flags = IORESOURCE_MEM,
883 }, 883 },
884 [1] = { 884 [1] = {
@@ -891,7 +891,6 @@ static struct resource dbgu_resources[] = {
891static struct atmel_uart_data dbgu_data = { 891static struct atmel_uart_data dbgu_data = {
892 .use_dma_tx = 0, 892 .use_dma_tx = 0,
893 .use_dma_rx = 0, /* DBGU not capable of receive DMA */ 893 .use_dma_rx = 0, /* DBGU not capable of receive DMA */
894 .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
895}; 894};
896 895
897static u64 dbgu_dmamask = DMA_BIT_MASK(32); 896static u64 dbgu_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index b84a9f642f59..249ed1f5912d 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -120,7 +120,7 @@ static struct clk ohci_clk = {
120 .type = CLK_TYPE_PERIPHERAL, 120 .type = CLK_TYPE_PERIPHERAL,
121}; 121};
122static struct clk macb_clk = { 122static struct clk macb_clk = {
123 .name = "macb_clk", 123 .name = "pclk",
124 .pmc_mask = 1 << AT91SAM9260_ID_EMAC, 124 .pmc_mask = 1 << AT91SAM9260_ID_EMAC,
125 .type = CLK_TYPE_PERIPHERAL, 125 .type = CLK_TYPE_PERIPHERAL,
126}; 126};
@@ -190,6 +190,8 @@ static struct clk *periph_clocks[] __initdata = {
190}; 190};
191 191
192static struct clk_lookup periph_clocks_lookups[] = { 192static struct clk_lookup periph_clocks_lookups[] = {
193 /* One additional fake clock for macb_hclk */
194 CLKDEV_CON_ID("hclk", &macb_clk),
193 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk), 195 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
194 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk), 196 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
195 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk), 197 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 24b6f8c0440d..ff75f7d4091b 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -136,7 +136,7 @@ void __init at91_add_device_udc(struct at91_udc_data *data) {}
136 136
137#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE) 137#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE)
138static u64 eth_dmamask = DMA_BIT_MASK(32); 138static u64 eth_dmamask = DMA_BIT_MASK(32);
139static struct at91_eth_data eth_data; 139static struct macb_platform_data eth_data;
140 140
141static struct resource eth_resources[] = { 141static struct resource eth_resources[] = {
142 [0] = { 142 [0] = {
@@ -163,7 +163,7 @@ static struct platform_device at91sam9260_eth_device = {
163 .num_resources = ARRAY_SIZE(eth_resources), 163 .num_resources = ARRAY_SIZE(eth_resources),
164}; 164};
165 165
166void __init at91_add_device_eth(struct at91_eth_data *data) 166void __init at91_add_device_eth(struct macb_platform_data *data)
167{ 167{
168 if (!data) 168 if (!data)
169 return; 169 return;
@@ -200,7 +200,7 @@ void __init at91_add_device_eth(struct at91_eth_data *data)
200 platform_device_register(&at91sam9260_eth_device); 200 platform_device_register(&at91sam9260_eth_device);
201} 201}
202#else 202#else
203void __init at91_add_device_eth(struct at91_eth_data *data) {} 203void __init at91_add_device_eth(struct macb_platform_data *data) {}
204#endif 204#endif
205 205
206 206
@@ -837,8 +837,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
837#if defined(CONFIG_SERIAL_ATMEL) 837#if defined(CONFIG_SERIAL_ATMEL)
838static struct resource dbgu_resources[] = { 838static struct resource dbgu_resources[] = {
839 [0] = { 839 [0] = {
840 .start = AT91_VA_BASE_SYS + AT91_DBGU, 840 .start = AT91_BASE_SYS + AT91_DBGU,
841 .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, 841 .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
842 .flags = IORESOURCE_MEM, 842 .flags = IORESOURCE_MEM,
843 }, 843 },
844 [1] = { 844 [1] = {
@@ -851,7 +851,6 @@ static struct resource dbgu_resources[] = {
851static struct atmel_uart_data dbgu_data = { 851static struct atmel_uart_data dbgu_data = {
852 .use_dma_tx = 0, 852 .use_dma_tx = 0,
853 .use_dma_rx = 0, /* DBGU not capable of receive DMA */ 853 .use_dma_rx = 0, /* DBGU not capable of receive DMA */
854 .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
855}; 854};
856 855
857static u64 dbgu_dmamask = DMA_BIT_MASK(32); 856static u64 dbgu_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 3b70b3897d95..ae78f4d03b73 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -816,8 +816,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
816#if defined(CONFIG_SERIAL_ATMEL) 816#if defined(CONFIG_SERIAL_ATMEL)
817static struct resource dbgu_resources[] = { 817static struct resource dbgu_resources[] = {
818 [0] = { 818 [0] = {
819 .start = AT91_VA_BASE_SYS + AT91_DBGU, 819 .start = AT91_BASE_SYS + AT91_DBGU,
820 .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, 820 .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
821 .flags = IORESOURCE_MEM, 821 .flags = IORESOURCE_MEM,
822 }, 822 },
823 [1] = { 823 [1] = {
@@ -830,7 +830,6 @@ static struct resource dbgu_resources[] = {
830static struct atmel_uart_data dbgu_data = { 830static struct atmel_uart_data dbgu_data = {
831 .use_dma_tx = 0, 831 .use_dma_tx = 0,
832 .use_dma_rx = 0, /* DBGU not capable of receive DMA */ 832 .use_dma_rx = 0, /* DBGU not capable of receive DMA */
833 .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
834}; 833};
835 834
836static u64 dbgu_dmamask = DMA_BIT_MASK(32); 835static u64 dbgu_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index f83fbb0ee0c5..182d112dc59d 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -118,7 +118,7 @@ static struct clk pwm_clk = {
118 .type = CLK_TYPE_PERIPHERAL, 118 .type = CLK_TYPE_PERIPHERAL,
119}; 119};
120static struct clk macb_clk = { 120static struct clk macb_clk = {
121 .name = "macb_clk", 121 .name = "pclk",
122 .pmc_mask = 1 << AT91SAM9263_ID_EMAC, 122 .pmc_mask = 1 << AT91SAM9263_ID_EMAC,
123 .type = CLK_TYPE_PERIPHERAL, 123 .type = CLK_TYPE_PERIPHERAL,
124}; 124};
@@ -182,6 +182,8 @@ static struct clk *periph_clocks[] __initdata = {
182}; 182};
183 183
184static struct clk_lookup periph_clocks_lookups[] = { 184static struct clk_lookup periph_clocks_lookups[] = {
185 /* One additional fake clock for macb_hclk */
186 CLKDEV_CON_ID("hclk", &macb_clk),
185 CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk), 187 CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
186 CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk), 188 CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
187 CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk), 189 CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index 3faa1fde9ad9..68562ce2af94 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -144,7 +144,7 @@ void __init at91_add_device_udc(struct at91_udc_data *data) {}
144 144
145#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE) 145#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE)
146static u64 eth_dmamask = DMA_BIT_MASK(32); 146static u64 eth_dmamask = DMA_BIT_MASK(32);
147static struct at91_eth_data eth_data; 147static struct macb_platform_data eth_data;
148 148
149static struct resource eth_resources[] = { 149static struct resource eth_resources[] = {
150 [0] = { 150 [0] = {
@@ -171,7 +171,7 @@ static struct platform_device at91sam9263_eth_device = {
171 .num_resources = ARRAY_SIZE(eth_resources), 171 .num_resources = ARRAY_SIZE(eth_resources),
172}; 172};
173 173
174void __init at91_add_device_eth(struct at91_eth_data *data) 174void __init at91_add_device_eth(struct macb_platform_data *data)
175{ 175{
176 if (!data) 176 if (!data)
177 return; 177 return;
@@ -208,7 +208,7 @@ void __init at91_add_device_eth(struct at91_eth_data *data)
208 platform_device_register(&at91sam9263_eth_device); 208 platform_device_register(&at91sam9263_eth_device);
209} 209}
210#else 210#else
211void __init at91_add_device_eth(struct at91_eth_data *data) {} 211void __init at91_add_device_eth(struct macb_platform_data *data) {}
212#endif 212#endif
213 213
214 214
@@ -1196,8 +1196,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
1196 1196
1197static struct resource dbgu_resources[] = { 1197static struct resource dbgu_resources[] = {
1198 [0] = { 1198 [0] = {
1199 .start = AT91_VA_BASE_SYS + AT91_DBGU, 1199 .start = AT91_BASE_SYS + AT91_DBGU,
1200 .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, 1200 .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
1201 .flags = IORESOURCE_MEM, 1201 .flags = IORESOURCE_MEM,
1202 }, 1202 },
1203 [1] = { 1203 [1] = {
@@ -1210,7 +1210,6 @@ static struct resource dbgu_resources[] = {
1210static struct atmel_uart_data dbgu_data = { 1210static struct atmel_uart_data dbgu_data = {
1211 .use_dma_tx = 0, 1211 .use_dma_tx = 0,
1212 .use_dma_rx = 0, /* DBGU not capable of receive DMA */ 1212 .use_dma_rx = 0, /* DBGU not capable of receive DMA */
1213 .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
1214}; 1213};
1215 1214
1216static u64 dbgu_dmamask = DMA_BIT_MASK(32); 1215static u64 dbgu_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index 318b0407ea04..5a0e522ffa94 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -150,7 +150,7 @@ static struct clk ac97_clk = {
150 .type = CLK_TYPE_PERIPHERAL, 150 .type = CLK_TYPE_PERIPHERAL,
151}; 151};
152static struct clk macb_clk = { 152static struct clk macb_clk = {
153 .name = "macb_clk", 153 .name = "pclk",
154 .pmc_mask = 1 << AT91SAM9G45_ID_EMAC, 154 .pmc_mask = 1 << AT91SAM9G45_ID_EMAC,
155 .type = CLK_TYPE_PERIPHERAL, 155 .type = CLK_TYPE_PERIPHERAL,
156}; 156};
@@ -209,6 +209,8 @@ static struct clk *periph_clocks[] __initdata = {
209}; 209};
210 210
211static struct clk_lookup periph_clocks_lookups[] = { 211static struct clk_lookup periph_clocks_lookups[] = {
212 /* One additional fake clock for macb_hclk */
213 CLKDEV_CON_ID("hclk", &macb_clk),
212 /* One additional fake clock for ohci */ 214 /* One additional fake clock for ohci */
213 CLKDEV_CON_ID("ohci_clk", &uhphs_clk), 215 CLKDEV_CON_ID("ohci_clk", &uhphs_clk),
214 CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci", &uhphs_clk), 216 CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci", &uhphs_clk),
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 000b5e1da965..e2cb835c4d7c 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -197,7 +197,7 @@ void __init at91_add_device_usbh_ehci(struct at91_usbh_data *data) {}
197 * USB HS Device (Gadget) 197 * USB HS Device (Gadget)
198 * -------------------------------------------------------------------- */ 198 * -------------------------------------------------------------------- */
199 199
200#if defined(CONFIG_USB_GADGET_ATMEL_USBA) || defined(CONFIG_USB_GADGET_ATMEL_USBA_MODULE) 200#if defined(CONFIG_USB_ATMEL_USBA) || defined(CONFIG_USB_ATMEL_USBA_MODULE)
201static struct resource usba_udc_resources[] = { 201static struct resource usba_udc_resources[] = {
202 [0] = { 202 [0] = {
203 .start = AT91SAM9G45_UDPHS_FIFO, 203 .start = AT91SAM9G45_UDPHS_FIFO,
@@ -284,7 +284,7 @@ void __init at91_add_device_usba(struct usba_platform_data *data) {}
284 284
285#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE) 285#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE)
286static u64 eth_dmamask = DMA_BIT_MASK(32); 286static u64 eth_dmamask = DMA_BIT_MASK(32);
287static struct at91_eth_data eth_data; 287static struct macb_platform_data eth_data;
288 288
289static struct resource eth_resources[] = { 289static struct resource eth_resources[] = {
290 [0] = { 290 [0] = {
@@ -311,7 +311,7 @@ static struct platform_device at91sam9g45_eth_device = {
311 .num_resources = ARRAY_SIZE(eth_resources), 311 .num_resources = ARRAY_SIZE(eth_resources),
312}; 312};
313 313
314void __init at91_add_device_eth(struct at91_eth_data *data) 314void __init at91_add_device_eth(struct macb_platform_data *data)
315{ 315{
316 if (!data) 316 if (!data)
317 return; 317 return;
@@ -348,7 +348,7 @@ void __init at91_add_device_eth(struct at91_eth_data *data)
348 platform_device_register(&at91sam9g45_eth_device); 348 platform_device_register(&at91sam9g45_eth_device);
349} 349}
350#else 350#else
351void __init at91_add_device_eth(struct at91_eth_data *data) {} 351void __init at91_add_device_eth(struct macb_platform_data *data) {}
352#endif 352#endif
353 353
354 354
@@ -1332,8 +1332,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
1332#if defined(CONFIG_SERIAL_ATMEL) 1332#if defined(CONFIG_SERIAL_ATMEL)
1333static struct resource dbgu_resources[] = { 1333static struct resource dbgu_resources[] = {
1334 [0] = { 1334 [0] = {
1335 .start = AT91_VA_BASE_SYS + AT91_DBGU, 1335 .start = AT91_BASE_SYS + AT91_DBGU,
1336 .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, 1336 .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
1337 .flags = IORESOURCE_MEM, 1337 .flags = IORESOURCE_MEM,
1338 }, 1338 },
1339 [1] = { 1339 [1] = {
@@ -1346,7 +1346,6 @@ static struct resource dbgu_resources[] = {
1346static struct atmel_uart_data dbgu_data = { 1346static struct atmel_uart_data dbgu_data = {
1347 .use_dma_tx = 0, 1347 .use_dma_tx = 0,
1348 .use_dma_rx = 0, 1348 .use_dma_rx = 0,
1349 .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
1350}; 1349};
1351 1350
1352static u64 dbgu_dmamask = DMA_BIT_MASK(32); 1351static u64 dbgu_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index 305a851b5bff..628eb566d60c 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -75,7 +75,7 @@ void __init at91_add_device_hdmac(void) {}
75 * USB HS Device (Gadget) 75 * USB HS Device (Gadget)
76 * -------------------------------------------------------------------- */ 76 * -------------------------------------------------------------------- */
77 77
78#if defined(CONFIG_USB_GADGET_ATMEL_USBA) || defined(CONFIG_USB_GADGET_ATMEL_USBA_MODULE) 78#if defined(CONFIG_USB_ATMEL_USBA) || defined(CONFIG_USB_ATMEL_USBA_MODULE)
79 79
80static struct resource usba_udc_resources[] = { 80static struct resource usba_udc_resources[] = {
81 [0] = { 81 [0] = {
@@ -908,8 +908,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
908#if defined(CONFIG_SERIAL_ATMEL) 908#if defined(CONFIG_SERIAL_ATMEL)
909static struct resource dbgu_resources[] = { 909static struct resource dbgu_resources[] = {
910 [0] = { 910 [0] = {
911 .start = AT91_VA_BASE_SYS + AT91_DBGU, 911 .start = AT91_BASE_SYS + AT91_DBGU,
912 .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, 912 .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
913 .flags = IORESOURCE_MEM, 913 .flags = IORESOURCE_MEM,
914 }, 914 },
915 [1] = { 915 [1] = {
@@ -922,7 +922,6 @@ static struct resource dbgu_resources[] = {
922static struct atmel_uart_data dbgu_data = { 922static struct atmel_uart_data dbgu_data = {
923 .use_dma_tx = 0, 923 .use_dma_tx = 0,
924 .use_dma_rx = 0, /* DBGU not capable of receive DMA */ 924 .use_dma_rx = 0, /* DBGU not capable of receive DMA */
925 .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
926}; 925};
927 926
928static u64 dbgu_dmamask = DMA_BIT_MASK(32); 927static u64 dbgu_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/arm/mach-at91/board-1arm.c b/arch/arm/mach-at91/board-1arm.c
index 367d5cd5e362..a60d98d7c3e2 100644
--- a/arch/arm/mach-at91/board-1arm.c
+++ b/arch/arm/mach-at91/board-1arm.c
@@ -63,7 +63,7 @@ static void __init onearm_init_early(void)
63 at91_set_serial_console(0); 63 at91_set_serial_console(0);
64} 64}
65 65
66static struct at91_eth_data __initdata onearm_eth_data = { 66static struct macb_platform_data __initdata onearm_eth_data = {
67 .phy_irq_pin = AT91_PIN_PC4, 67 .phy_irq_pin = AT91_PIN_PC4,
68 .is_rmii = 1, 68 .is_rmii = 1,
69}; 69};
diff --git a/arch/arm/mach-at91/board-afeb-9260v1.c b/arch/arm/mach-at91/board-afeb-9260v1.c
index 0487ea10c2d6..17fc77925707 100644
--- a/arch/arm/mach-at91/board-afeb-9260v1.c
+++ b/arch/arm/mach-at91/board-afeb-9260v1.c
@@ -103,7 +103,7 @@ static struct spi_board_info afeb9260_spi_devices[] = {
103/* 103/*
104 * MACB Ethernet device 104 * MACB Ethernet device
105 */ 105 */
106static struct at91_eth_data __initdata afeb9260_macb_data = { 106static struct macb_platform_data __initdata afeb9260_macb_data = {
107 .phy_irq_pin = AT91_PIN_PA9, 107 .phy_irq_pin = AT91_PIN_PA9,
108 .is_rmii = 0, 108 .is_rmii = 0,
109}; 109};
@@ -130,19 +130,14 @@ static struct mtd_partition __initdata afeb9260_nand_partition[] = {
130 }, 130 },
131}; 131};
132 132
133static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
134{
135 *num_partitions = ARRAY_SIZE(afeb9260_nand_partition);
136 return afeb9260_nand_partition;
137}
138
139static struct atmel_nand_data __initdata afeb9260_nand_data = { 133static struct atmel_nand_data __initdata afeb9260_nand_data = {
140 .ale = 21, 134 .ale = 21,
141 .cle = 22, 135 .cle = 22,
142 .rdy_pin = AT91_PIN_PC13, 136 .rdy_pin = AT91_PIN_PC13,
143 .enable_pin = AT91_PIN_PC14, 137 .enable_pin = AT91_PIN_PC14,
144 .partition_info = nand_partitions,
145 .bus_width_16 = 0, 138 .bus_width_16 = 0,
139 .parts = afeb9260_nand_partition,
140 .num_parts = ARRAY_SIZE(afeb9260_nand_partition),
146}; 141};
147 142
148 143
diff --git a/arch/arm/mach-at91/board-cam60.c b/arch/arm/mach-at91/board-cam60.c
index 747b2eaa9737..2037d2c40eb4 100644
--- a/arch/arm/mach-at91/board-cam60.c
+++ b/arch/arm/mach-at91/board-cam60.c
@@ -115,7 +115,7 @@ static struct spi_board_info cam60_spi_devices[] __initdata = {
115/* 115/*
116 * MACB Ethernet device 116 * MACB Ethernet device
117 */ 117 */
118static struct __initdata at91_eth_data cam60_macb_data = { 118static struct __initdata macb_platform_data cam60_macb_data = {
119 .phy_irq_pin = AT91_PIN_PB5, 119 .phy_irq_pin = AT91_PIN_PB5,
120 .is_rmii = 0, 120 .is_rmii = 0,
121}; 121};
@@ -132,19 +132,14 @@ static struct mtd_partition __initdata cam60_nand_partition[] = {
132 }, 132 },
133}; 133};
134 134
135static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
136{
137 *num_partitions = ARRAY_SIZE(cam60_nand_partition);
138 return cam60_nand_partition;
139}
140
141static struct atmel_nand_data __initdata cam60_nand_data = { 135static struct atmel_nand_data __initdata cam60_nand_data = {
142 .ale = 21, 136 .ale = 21,
143 .cle = 22, 137 .cle = 22,
144 // .det_pin = ... not there 138 // .det_pin = ... not there
145 .rdy_pin = AT91_PIN_PA9, 139 .rdy_pin = AT91_PIN_PA9,
146 .enable_pin = AT91_PIN_PA7, 140 .enable_pin = AT91_PIN_PA7,
147 .partition_info = nand_partitions, 141 .parts = cam60_nand_partition,
142 .num_parts = ARRAY_SIZE(cam60_nand_partition),
148}; 143};
149 144
150static struct sam9_smc_config __initdata cam60_nand_smc_config = { 145static struct sam9_smc_config __initdata cam60_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-cap9adk.c b/arch/arm/mach-at91/board-cap9adk.c
index 062670351a6a..af5520c366fe 100644
--- a/arch/arm/mach-at91/board-cap9adk.c
+++ b/arch/arm/mach-at91/board-cap9adk.c
@@ -153,7 +153,7 @@ static struct at91_mmc_data __initdata cap9adk_mmc_data = {
153/* 153/*
154 * MACB Ethernet device 154 * MACB Ethernet device
155 */ 155 */
156static struct at91_eth_data __initdata cap9adk_macb_data = { 156static struct macb_platform_data __initdata cap9adk_macb_data = {
157 .is_rmii = 1, 157 .is_rmii = 1,
158}; 158};
159 159
@@ -169,19 +169,14 @@ static struct mtd_partition __initdata cap9adk_nand_partitions[] = {
169 }, 169 },
170}; 170};
171 171
172static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
173{
174 *num_partitions = ARRAY_SIZE(cap9adk_nand_partitions);
175 return cap9adk_nand_partitions;
176}
177
178static struct atmel_nand_data __initdata cap9adk_nand_data = { 172static struct atmel_nand_data __initdata cap9adk_nand_data = {
179 .ale = 21, 173 .ale = 21,
180 .cle = 22, 174 .cle = 22,
181// .det_pin = ... not connected 175// .det_pin = ... not connected
182// .rdy_pin = ... not connected 176// .rdy_pin = ... not connected
183 .enable_pin = AT91_PIN_PD15, 177 .enable_pin = AT91_PIN_PD15,
184 .partition_info = nand_partitions, 178 .parts = cap9adk_nand_partitions,
179 .num_parts = ARRAY_SIZE(cap9adk_nand_partitions),
185}; 180};
186 181
187static struct sam9_smc_config __initdata cap9adk_nand_smc_config = { 182static struct sam9_smc_config __initdata cap9adk_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-carmeva.c b/arch/arm/mach-at91/board-carmeva.c
index 774c87fcbd5b..529b356cdb7d 100644
--- a/arch/arm/mach-at91/board-carmeva.c
+++ b/arch/arm/mach-at91/board-carmeva.c
@@ -57,7 +57,7 @@ static void __init carmeva_init_early(void)
57 at91_set_serial_console(0); 57 at91_set_serial_console(0);
58} 58}
59 59
60static struct at91_eth_data __initdata carmeva_eth_data = { 60static struct macb_platform_data __initdata carmeva_eth_data = {
61 .phy_irq_pin = AT91_PIN_PC4, 61 .phy_irq_pin = AT91_PIN_PC4,
62 .is_rmii = 1, 62 .is_rmii = 1,
63}; 63};
diff --git a/arch/arm/mach-at91/board-cpu9krea.c b/arch/arm/mach-at91/board-cpu9krea.c
index fc885a4ce243..04d2b9b50464 100644
--- a/arch/arm/mach-at91/board-cpu9krea.c
+++ b/arch/arm/mach-at91/board-cpu9krea.c
@@ -99,7 +99,7 @@ static struct at91_udc_data __initdata cpu9krea_udc_data = {
99/* 99/*
100 * MACB Ethernet device 100 * MACB Ethernet device
101 */ 101 */
102static struct at91_eth_data __initdata cpu9krea_macb_data = { 102static struct macb_platform_data __initdata cpu9krea_macb_data = {
103 .is_rmii = 1, 103 .is_rmii = 1,
104}; 104};
105 105
diff --git a/arch/arm/mach-at91/board-cpuat91.c b/arch/arm/mach-at91/board-cpuat91.c
index d35e65b08ccd..7a4c82e8da51 100644
--- a/arch/arm/mach-at91/board-cpuat91.c
+++ b/arch/arm/mach-at91/board-cpuat91.c
@@ -82,7 +82,7 @@ static void __init cpuat91_init_early(void)
82 at91_set_serial_console(0); 82 at91_set_serial_console(0);
83} 83}
84 84
85static struct at91_eth_data __initdata cpuat91_eth_data = { 85static struct macb_platform_data __initdata cpuat91_eth_data = {
86 .is_rmii = 1, 86 .is_rmii = 1,
87}; 87};
88 88
diff --git a/arch/arm/mach-at91/board-csb337.c b/arch/arm/mach-at91/board-csb337.c
index c3936665e645..b004b20b8e42 100644
--- a/arch/arm/mach-at91/board-csb337.c
+++ b/arch/arm/mach-at91/board-csb337.c
@@ -58,7 +58,7 @@ static void __init csb337_init_early(void)
58 at91_set_serial_console(0); 58 at91_set_serial_console(0);
59} 59}
60 60
61static struct at91_eth_data __initdata csb337_eth_data = { 61static struct macb_platform_data __initdata csb337_eth_data = {
62 .phy_irq_pin = AT91_PIN_PC2, 62 .phy_irq_pin = AT91_PIN_PC2,
63 .is_rmii = 0, 63 .is_rmii = 0,
64}; 64};
diff --git a/arch/arm/mach-at91/board-csb637.c b/arch/arm/mach-at91/board-csb637.c
index 586100e2acbb..e966de5219c7 100644
--- a/arch/arm/mach-at91/board-csb637.c
+++ b/arch/arm/mach-at91/board-csb637.c
@@ -52,7 +52,7 @@ static void __init csb637_init_early(void)
52 at91_set_serial_console(0); 52 at91_set_serial_console(0);
53} 53}
54 54
55static struct at91_eth_data __initdata csb637_eth_data = { 55static struct macb_platform_data __initdata csb637_eth_data = {
56 .phy_irq_pin = AT91_PIN_PC0, 56 .phy_irq_pin = AT91_PIN_PC0,
57 .is_rmii = 0, 57 .is_rmii = 0,
58}; 58};
diff --git a/arch/arm/mach-at91/board-eb9200.c b/arch/arm/mach-at91/board-eb9200.c
index 45db7a3dbef0..3788fa527121 100644
--- a/arch/arm/mach-at91/board-eb9200.c
+++ b/arch/arm/mach-at91/board-eb9200.c
@@ -60,7 +60,7 @@ static void __init eb9200_init_early(void)
60 at91_set_serial_console(0); 60 at91_set_serial_console(0);
61} 61}
62 62
63static struct at91_eth_data __initdata eb9200_eth_data = { 63static struct macb_platform_data __initdata eb9200_eth_data = {
64 .phy_irq_pin = AT91_PIN_PC4, 64 .phy_irq_pin = AT91_PIN_PC4,
65 .is_rmii = 1, 65 .is_rmii = 1,
66}; 66};
diff --git a/arch/arm/mach-at91/board-ecbat91.c b/arch/arm/mach-at91/board-ecbat91.c
index 2f9c16d29212..af7622eae1a9 100644
--- a/arch/arm/mach-at91/board-ecbat91.c
+++ b/arch/arm/mach-at91/board-ecbat91.c
@@ -64,7 +64,7 @@ static void __init ecb_at91init_early(void)
64 at91_set_serial_console(0); 64 at91_set_serial_console(0);
65} 65}
66 66
67static struct at91_eth_data __initdata ecb_at91eth_data = { 67static struct macb_platform_data __initdata ecb_at91eth_data = {
68 .phy_irq_pin = AT91_PIN_PC4, 68 .phy_irq_pin = AT91_PIN_PC4,
69 .is_rmii = 0, 69 .is_rmii = 0,
70}; 70};
diff --git a/arch/arm/mach-at91/board-eco920.c b/arch/arm/mach-at91/board-eco920.c
index 8252c722607b..8e75867d1d18 100644
--- a/arch/arm/mach-at91/board-eco920.c
+++ b/arch/arm/mach-at91/board-eco920.c
@@ -47,7 +47,7 @@ static void __init eco920_init_early(void)
47 at91_set_serial_console(0); 47 at91_set_serial_console(0);
48} 48}
49 49
50static struct at91_eth_data __initdata eco920_eth_data = { 50static struct macb_platform_data __initdata eco920_eth_data = {
51 .phy_irq_pin = AT91_PIN_PC2, 51 .phy_irq_pin = AT91_PIN_PC2,
52 .is_rmii = 1, 52 .is_rmii = 1,
53}; 53};
diff --git a/arch/arm/mach-at91/board-foxg20.c b/arch/arm/mach-at91/board-foxg20.c
index f27d1a780cfa..de8e09642f4e 100644
--- a/arch/arm/mach-at91/board-foxg20.c
+++ b/arch/arm/mach-at91/board-foxg20.c
@@ -135,7 +135,7 @@ static struct spi_board_info foxg20_spi_devices[] = {
135/* 135/*
136 * MACB Ethernet device 136 * MACB Ethernet device
137 */ 137 */
138static struct at91_eth_data __initdata foxg20_macb_data = { 138static struct macb_platform_data __initdata foxg20_macb_data = {
139 .phy_irq_pin = AT91_PIN_PA7, 139 .phy_irq_pin = AT91_PIN_PA7,
140 .is_rmii = 1, 140 .is_rmii = 1,
141}; 141};
diff --git a/arch/arm/mach-at91/board-gsia18s.c b/arch/arm/mach-at91/board-gsia18s.c
index 2e95949737e6..51c82f151119 100644
--- a/arch/arm/mach-at91/board-gsia18s.c
+++ b/arch/arm/mach-at91/board-gsia18s.c
@@ -93,7 +93,7 @@ static struct at91_udc_data __initdata udc_data = {
93/* 93/*
94 * MACB Ethernet device 94 * MACB Ethernet device
95 */ 95 */
96static struct at91_eth_data __initdata macb_data = { 96static struct macb_platform_data __initdata macb_data = {
97 .phy_irq_pin = AT91_PIN_PA28, 97 .phy_irq_pin = AT91_PIN_PA28,
98 .is_rmii = 1, 98 .is_rmii = 1,
99}; 99};
diff --git a/arch/arm/mach-at91/board-kafa.c b/arch/arm/mach-at91/board-kafa.c
index 3bae73e63633..9628a3defcf4 100644
--- a/arch/arm/mach-at91/board-kafa.c
+++ b/arch/arm/mach-at91/board-kafa.c
@@ -61,7 +61,7 @@ static void __init kafa_init_early(void)
61 at91_set_serial_console(0); 61 at91_set_serial_console(0);
62} 62}
63 63
64static struct at91_eth_data __initdata kafa_eth_data = { 64static struct macb_platform_data __initdata kafa_eth_data = {
65 .phy_irq_pin = AT91_PIN_PC4, 65 .phy_irq_pin = AT91_PIN_PC4,
66 .is_rmii = 0, 66 .is_rmii = 0,
67}; 67};
diff --git a/arch/arm/mach-at91/board-kb9202.c b/arch/arm/mach-at91/board-kb9202.c
index 15a3f1a87ab0..5ba5244cb632 100644
--- a/arch/arm/mach-at91/board-kb9202.c
+++ b/arch/arm/mach-at91/board-kb9202.c
@@ -69,7 +69,7 @@ static void __init kb9202_init_early(void)
69 at91_set_serial_console(0); 69 at91_set_serial_console(0);
70} 70}
71 71
72static struct at91_eth_data __initdata kb9202_eth_data = { 72static struct macb_platform_data __initdata kb9202_eth_data = {
73 .phy_irq_pin = AT91_PIN_PB29, 73 .phy_irq_pin = AT91_PIN_PB29,
74 .is_rmii = 0, 74 .is_rmii = 0,
75}; 75};
@@ -97,19 +97,14 @@ static struct mtd_partition __initdata kb9202_nand_partition[] = {
97 }, 97 },
98}; 98};
99 99
100static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
101{
102 *num_partitions = ARRAY_SIZE(kb9202_nand_partition);
103 return kb9202_nand_partition;
104}
105
106static struct atmel_nand_data __initdata kb9202_nand_data = { 100static struct atmel_nand_data __initdata kb9202_nand_data = {
107 .ale = 22, 101 .ale = 22,
108 .cle = 21, 102 .cle = 21,
109 // .det_pin = ... not there 103 // .det_pin = ... not there
110 .rdy_pin = AT91_PIN_PC29, 104 .rdy_pin = AT91_PIN_PC29,
111 .enable_pin = AT91_PIN_PC28, 105 .enable_pin = AT91_PIN_PC28,
112 .partition_info = nand_partitions, 106 .parts = kb9202_nand_partition,
107 .num_parts = ARRAY_SIZE(kb9202_nand_partition),
113}; 108};
114 109
115static void __init kb9202_board_init(void) 110static void __init kb9202_board_init(void)
diff --git a/arch/arm/mach-at91/board-neocore926.c b/arch/arm/mach-at91/board-neocore926.c
index 6094496f7edb..56e7aee11b59 100644
--- a/arch/arm/mach-at91/board-neocore926.c
+++ b/arch/arm/mach-at91/board-neocore926.c
@@ -155,7 +155,7 @@ static struct at91_mmc_data __initdata neocore926_mmc_data = {
155/* 155/*
156 * MACB Ethernet device 156 * MACB Ethernet device
157 */ 157 */
158static struct at91_eth_data __initdata neocore926_macb_data = { 158static struct macb_platform_data __initdata neocore926_macb_data = {
159 .phy_irq_pin = AT91_PIN_PE31, 159 .phy_irq_pin = AT91_PIN_PE31,
160 .is_rmii = 1, 160 .is_rmii = 1,
161}; 161};
@@ -182,19 +182,14 @@ static struct mtd_partition __initdata neocore926_nand_partition[] = {
182 }, 182 },
183}; 183};
184 184
185static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
186{
187 *num_partitions = ARRAY_SIZE(neocore926_nand_partition);
188 return neocore926_nand_partition;
189}
190
191static struct atmel_nand_data __initdata neocore926_nand_data = { 185static struct atmel_nand_data __initdata neocore926_nand_data = {
192 .ale = 21, 186 .ale = 21,
193 .cle = 22, 187 .cle = 22,
194 .rdy_pin = AT91_PIN_PB19, 188 .rdy_pin = AT91_PIN_PB19,
195 .rdy_pin_active_low = 1, 189 .rdy_pin_active_low = 1,
196 .enable_pin = AT91_PIN_PD15, 190 .enable_pin = AT91_PIN_PD15,
197 .partition_info = nand_partitions, 191 .parts = neocore926_nand_partition,
192 .num_parts = ARRAY_SIZE(neocore926_nand_partition),
198}; 193};
199 194
200static struct sam9_smc_config __initdata neocore926_nand_smc_config = { 195static struct sam9_smc_config __initdata neocore926_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-pcontrol-g20.c b/arch/arm/mach-at91/board-pcontrol-g20.c
index 49e3f699b48e..c545a3e635a4 100644
--- a/arch/arm/mach-at91/board-pcontrol-g20.c
+++ b/arch/arm/mach-at91/board-pcontrol-g20.c
@@ -122,7 +122,7 @@ static struct at91_udc_data __initdata pcontrol_g20_udc_data = {
122/* 122/*
123 * MACB Ethernet device 123 * MACB Ethernet device
124 */ 124 */
125static struct at91_eth_data __initdata macb_data = { 125static struct macb_platform_data __initdata macb_data = {
126 .phy_irq_pin = AT91_PIN_PA28, 126 .phy_irq_pin = AT91_PIN_PA28,
127 .is_rmii = 1, 127 .is_rmii = 1,
128}; 128};
diff --git a/arch/arm/mach-at91/board-picotux200.c b/arch/arm/mach-at91/board-picotux200.c
index 0a8fe6a1b7c8..dc18759a24b4 100644
--- a/arch/arm/mach-at91/board-picotux200.c
+++ b/arch/arm/mach-at91/board-picotux200.c
@@ -60,7 +60,7 @@ static void __init picotux200_init_early(void)
60 at91_set_serial_console(0); 60 at91_set_serial_console(0);
61} 61}
62 62
63static struct at91_eth_data __initdata picotux200_eth_data = { 63static struct macb_platform_data __initdata picotux200_eth_data = {
64 .phy_irq_pin = AT91_PIN_PC4, 64 .phy_irq_pin = AT91_PIN_PC4,
65 .is_rmii = 1, 65 .is_rmii = 1,
66}; 66};
diff --git a/arch/arm/mach-at91/board-qil-a9260.c b/arch/arm/mach-at91/board-qil-a9260.c
index 938cc390bea3..5444d6ac514a 100644
--- a/arch/arm/mach-at91/board-qil-a9260.c
+++ b/arch/arm/mach-at91/board-qil-a9260.c
@@ -104,7 +104,7 @@ static struct spi_board_info ek_spi_devices[] = {
104/* 104/*
105 * MACB Ethernet device 105 * MACB Ethernet device
106 */ 106 */
107static struct at91_eth_data __initdata ek_macb_data = { 107static struct macb_platform_data __initdata ek_macb_data = {
108 .phy_irq_pin = AT91_PIN_PA31, 108 .phy_irq_pin = AT91_PIN_PA31,
109 .is_rmii = 1, 109 .is_rmii = 1,
110}; 110};
@@ -130,19 +130,14 @@ static struct mtd_partition __initdata ek_nand_partition[] = {
130 }, 130 },
131}; 131};
132 132
133static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
134{
135 *num_partitions = ARRAY_SIZE(ek_nand_partition);
136 return ek_nand_partition;
137}
138
139static struct atmel_nand_data __initdata ek_nand_data = { 133static struct atmel_nand_data __initdata ek_nand_data = {
140 .ale = 21, 134 .ale = 21,
141 .cle = 22, 135 .cle = 22,
142// .det_pin = ... not connected 136// .det_pin = ... not connected
143 .rdy_pin = AT91_PIN_PC13, 137 .rdy_pin = AT91_PIN_PC13,
144 .enable_pin = AT91_PIN_PC14, 138 .enable_pin = AT91_PIN_PC14,
145 .partition_info = nand_partitions, 139 .parts = ek_nand_partition,
140 .num_parts = ARRAY_SIZE(ek_nand_partition),
146}; 141};
147 142
148static struct sam9_smc_config __initdata ek_nand_smc_config = { 143static struct sam9_smc_config __initdata ek_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-rm9200dk.c b/arch/arm/mach-at91/board-rm9200dk.c
index b4ac30e38a9e..022d0cebda9d 100644
--- a/arch/arm/mach-at91/board-rm9200dk.c
+++ b/arch/arm/mach-at91/board-rm9200dk.c
@@ -65,7 +65,7 @@ static void __init dk_init_early(void)
65 at91_set_serial_console(0); 65 at91_set_serial_console(0);
66} 66}
67 67
68static struct at91_eth_data __initdata dk_eth_data = { 68static struct macb_platform_data __initdata dk_eth_data = {
69 .phy_irq_pin = AT91_PIN_PC4, 69 .phy_irq_pin = AT91_PIN_PC4,
70 .is_rmii = 1, 70 .is_rmii = 1,
71}; 71};
@@ -138,19 +138,14 @@ static struct mtd_partition __initdata dk_nand_partition[] = {
138 }, 138 },
139}; 139};
140 140
141static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
142{
143 *num_partitions = ARRAY_SIZE(dk_nand_partition);
144 return dk_nand_partition;
145}
146
147static struct atmel_nand_data __initdata dk_nand_data = { 141static struct atmel_nand_data __initdata dk_nand_data = {
148 .ale = 22, 142 .ale = 22,
149 .cle = 21, 143 .cle = 21,
150 .det_pin = AT91_PIN_PB1, 144 .det_pin = AT91_PIN_PB1,
151 .rdy_pin = AT91_PIN_PC2, 145 .rdy_pin = AT91_PIN_PC2,
152 // .enable_pin = ... not there 146 // .enable_pin = ... not there
153 .partition_info = nand_partitions, 147 .parts = dk_nand_partition,
148 .num_parts = ARRAY_SIZE(dk_nand_partition),
154}; 149};
155 150
156#define DK_FLASH_BASE AT91_CHIPSELECT_0 151#define DK_FLASH_BASE AT91_CHIPSELECT_0
diff --git a/arch/arm/mach-at91/board-rm9200ek.c b/arch/arm/mach-at91/board-rm9200ek.c
index 99fd7f8aee0e..ed275861adef 100644
--- a/arch/arm/mach-at91/board-rm9200ek.c
+++ b/arch/arm/mach-at91/board-rm9200ek.c
@@ -65,7 +65,7 @@ static void __init ek_init_early(void)
65 at91_set_serial_console(0); 65 at91_set_serial_console(0);
66} 66}
67 67
68static struct at91_eth_data __initdata ek_eth_data = { 68static struct macb_platform_data __initdata ek_eth_data = {
69 .phy_irq_pin = AT91_PIN_PC4, 69 .phy_irq_pin = AT91_PIN_PC4,
70 .is_rmii = 1, 70 .is_rmii = 1,
71}; 71};
diff --git a/arch/arm/mach-at91/board-rsi-ews.c b/arch/arm/mach-at91/board-rsi-ews.c
index e927df0175df..ed3b21f77674 100644
--- a/arch/arm/mach-at91/board-rsi-ews.c
+++ b/arch/arm/mach-at91/board-rsi-ews.c
@@ -60,7 +60,7 @@ static void __init rsi_ews_init_early(void)
60/* 60/*
61 * Ethernet 61 * Ethernet
62 */ 62 */
63static struct at91_eth_data rsi_ews_eth_data __initdata = { 63static struct macb_platform_data rsi_ews_eth_data __initdata = {
64 .phy_irq_pin = AT91_PIN_PC4, 64 .phy_irq_pin = AT91_PIN_PC4,
65 .is_rmii = 1, 65 .is_rmii = 1,
66}; 66};
diff --git a/arch/arm/mach-at91/board-sam9-l9260.c b/arch/arm/mach-at91/board-sam9-l9260.c
index 2a21e790250e..3e4b50e6f6ab 100644
--- a/arch/arm/mach-at91/board-sam9-l9260.c
+++ b/arch/arm/mach-at91/board-sam9-l9260.c
@@ -109,7 +109,7 @@ static struct spi_board_info ek_spi_devices[] = {
109/* 109/*
110 * MACB Ethernet device 110 * MACB Ethernet device
111 */ 111 */
112static struct at91_eth_data __initdata ek_macb_data = { 112static struct macb_platform_data __initdata ek_macb_data = {
113 .phy_irq_pin = AT91_PIN_PA7, 113 .phy_irq_pin = AT91_PIN_PA7,
114 .is_rmii = 0, 114 .is_rmii = 0,
115}; 115};
@@ -131,19 +131,14 @@ static struct mtd_partition __initdata ek_nand_partition[] = {
131 }, 131 },
132}; 132};
133 133
134static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
135{
136 *num_partitions = ARRAY_SIZE(ek_nand_partition);
137 return ek_nand_partition;
138}
139
140static struct atmel_nand_data __initdata ek_nand_data = { 134static struct atmel_nand_data __initdata ek_nand_data = {
141 .ale = 21, 135 .ale = 21,
142 .cle = 22, 136 .cle = 22,
143// .det_pin = ... not connected 137// .det_pin = ... not connected
144 .rdy_pin = AT91_PIN_PC13, 138 .rdy_pin = AT91_PIN_PC13,
145 .enable_pin = AT91_PIN_PC14, 139 .enable_pin = AT91_PIN_PC14,
146 .partition_info = nand_partitions, 140 .parts = ek_nand_partition,
141 .num_parts = ARRAY_SIZE(ek_nand_partition),
147}; 142};
148 143
149static struct sam9_smc_config __initdata ek_nand_smc_config = { 144static struct sam9_smc_config __initdata ek_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c
index 89c8b579bfda..13478e14a543 100644
--- a/arch/arm/mach-at91/board-sam9260ek.c
+++ b/arch/arm/mach-at91/board-sam9260ek.c
@@ -151,7 +151,7 @@ static struct spi_board_info ek_spi_devices[] = {
151/* 151/*
152 * MACB Ethernet device 152 * MACB Ethernet device
153 */ 153 */
154static struct at91_eth_data __initdata ek_macb_data = { 154static struct macb_platform_data __initdata ek_macb_data = {
155 .phy_irq_pin = AT91_PIN_PA7, 155 .phy_irq_pin = AT91_PIN_PA7,
156 .is_rmii = 1, 156 .is_rmii = 1,
157}; 157};
@@ -173,19 +173,14 @@ static struct mtd_partition __initdata ek_nand_partition[] = {
173 }, 173 },
174}; 174};
175 175
176static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
177{
178 *num_partitions = ARRAY_SIZE(ek_nand_partition);
179 return ek_nand_partition;
180}
181
182static struct atmel_nand_data __initdata ek_nand_data = { 176static struct atmel_nand_data __initdata ek_nand_data = {
183 .ale = 21, 177 .ale = 21,
184 .cle = 22, 178 .cle = 22,
185// .det_pin = ... not connected 179// .det_pin = ... not connected
186 .rdy_pin = AT91_PIN_PC13, 180 .rdy_pin = AT91_PIN_PC13,
187 .enable_pin = AT91_PIN_PC14, 181 .enable_pin = AT91_PIN_PC14,
188 .partition_info = nand_partitions, 182 .parts = ek_nand_partition,
183 .num_parts = ARRAY_SIZE(ek_nand_partition),
189}; 184};
190 185
191static struct sam9_smc_config __initdata ek_nand_smc_config = { 186static struct sam9_smc_config __initdata ek_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index 3741f43cdae9..b005b738e8ff 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -179,19 +179,14 @@ static struct mtd_partition __initdata ek_nand_partition[] = {
179 }, 179 },
180}; 180};
181 181
182static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
183{
184 *num_partitions = ARRAY_SIZE(ek_nand_partition);
185 return ek_nand_partition;
186}
187
188static struct atmel_nand_data __initdata ek_nand_data = { 182static struct atmel_nand_data __initdata ek_nand_data = {
189 .ale = 22, 183 .ale = 22,
190 .cle = 21, 184 .cle = 21,
191// .det_pin = ... not connected 185// .det_pin = ... not connected
192 .rdy_pin = AT91_PIN_PC15, 186 .rdy_pin = AT91_PIN_PC15,
193 .enable_pin = AT91_PIN_PC14, 187 .enable_pin = AT91_PIN_PC14,
194 .partition_info = nand_partitions, 188 .parts = ek_nand_partition,
189 .num_parts = ARRAY_SIZE(ek_nand_partition),
195}; 190};
196 191
197static struct sam9_smc_config __initdata ek_nand_smc_config = { 192static struct sam9_smc_config __initdata ek_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c
index a580dd451a41..fcf194e6e4fe 100644
--- a/arch/arm/mach-at91/board-sam9263ek.c
+++ b/arch/arm/mach-at91/board-sam9263ek.c
@@ -158,7 +158,7 @@ static struct at91_mmc_data __initdata ek_mmc_data = {
158/* 158/*
159 * MACB Ethernet device 159 * MACB Ethernet device
160 */ 160 */
161static struct at91_eth_data __initdata ek_macb_data = { 161static struct macb_platform_data __initdata ek_macb_data = {
162 .phy_irq_pin = AT91_PIN_PE31, 162 .phy_irq_pin = AT91_PIN_PE31,
163 .is_rmii = 1, 163 .is_rmii = 1,
164}; 164};
@@ -180,19 +180,14 @@ static struct mtd_partition __initdata ek_nand_partition[] = {
180 }, 180 },
181}; 181};
182 182
183static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
184{
185 *num_partitions = ARRAY_SIZE(ek_nand_partition);
186 return ek_nand_partition;
187}
188
189static struct atmel_nand_data __initdata ek_nand_data = { 183static struct atmel_nand_data __initdata ek_nand_data = {
190 .ale = 21, 184 .ale = 21,
191 .cle = 22, 185 .cle = 22,
192// .det_pin = ... not connected 186// .det_pin = ... not connected
193 .rdy_pin = AT91_PIN_PA22, 187 .rdy_pin = AT91_PIN_PA22,
194 .enable_pin = AT91_PIN_PD15, 188 .enable_pin = AT91_PIN_PD15,
195 .partition_info = nand_partitions, 189 .parts = ek_nand_partition,
190 .num_parts = ARRAY_SIZE(ek_nand_partition),
196}; 191};
197 192
198static struct sam9_smc_config __initdata ek_nand_smc_config = { 193static struct sam9_smc_config __initdata ek_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c
index 8d77c2ff96b2..78d27cc3cc09 100644
--- a/arch/arm/mach-at91/board-sam9g20ek.c
+++ b/arch/arm/mach-at91/board-sam9g20ek.c
@@ -123,7 +123,7 @@ static struct spi_board_info ek_spi_devices[] = {
123/* 123/*
124 * MACB Ethernet device 124 * MACB Ethernet device
125 */ 125 */
126static struct at91_eth_data __initdata ek_macb_data = { 126static struct macb_platform_data __initdata ek_macb_data = {
127 .phy_irq_pin = AT91_PIN_PA7, 127 .phy_irq_pin = AT91_PIN_PA7,
128 .is_rmii = 1, 128 .is_rmii = 1,
129}; 129};
@@ -157,19 +157,14 @@ static struct mtd_partition __initdata ek_nand_partition[] = {
157 }, 157 },
158}; 158};
159 159
160static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
161{
162 *num_partitions = ARRAY_SIZE(ek_nand_partition);
163 return ek_nand_partition;
164}
165
166/* det_pin is not connected */ 160/* det_pin is not connected */
167static struct atmel_nand_data __initdata ek_nand_data = { 161static struct atmel_nand_data __initdata ek_nand_data = {
168 .ale = 21, 162 .ale = 21,
169 .cle = 22, 163 .cle = 22,
170 .rdy_pin = AT91_PIN_PC13, 164 .rdy_pin = AT91_PIN_PC13,
171 .enable_pin = AT91_PIN_PC14, 165 .enable_pin = AT91_PIN_PC14,
172 .partition_info = nand_partitions, 166 .parts = ek_nand_partition,
167 .num_parts = ARRAY_SIZE(ek_nand_partition),
173}; 168};
174 169
175static struct sam9_smc_config __initdata ek_nand_smc_config = { 170static struct sam9_smc_config __initdata ek_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index 2d6203ac1a42..4e1ee9d87096 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -115,7 +115,7 @@ static struct mci_platform_data __initdata mci1_data = {
115/* 115/*
116 * MACB Ethernet device 116 * MACB Ethernet device
117 */ 117 */
118static struct at91_eth_data __initdata ek_macb_data = { 118static struct macb_platform_data __initdata ek_macb_data = {
119 .phy_irq_pin = AT91_PIN_PD5, 119 .phy_irq_pin = AT91_PIN_PD5,
120 .is_rmii = 1, 120 .is_rmii = 1,
121}; 121};
@@ -137,19 +137,14 @@ static struct mtd_partition __initdata ek_nand_partition[] = {
137 }, 137 },
138}; 138};
139 139
140static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
141{
142 *num_partitions = ARRAY_SIZE(ek_nand_partition);
143 return ek_nand_partition;
144}
145
146/* det_pin is not connected */ 140/* det_pin is not connected */
147static struct atmel_nand_data __initdata ek_nand_data = { 141static struct atmel_nand_data __initdata ek_nand_data = {
148 .ale = 21, 142 .ale = 21,
149 .cle = 22, 143 .cle = 22,
150 .rdy_pin = AT91_PIN_PC8, 144 .rdy_pin = AT91_PIN_PC8,
151 .enable_pin = AT91_PIN_PC14, 145 .enable_pin = AT91_PIN_PC14,
152 .partition_info = nand_partitions, 146 .parts = ek_nand_partition,
147 .num_parts = ARRAY_SIZE(ek_nand_partition),
153}; 148};
154 149
155static struct sam9_smc_config __initdata ek_nand_smc_config = { 150static struct sam9_smc_config __initdata ek_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c
index 39a28effc3df..b2b748239f36 100644
--- a/arch/arm/mach-at91/board-sam9rlek.c
+++ b/arch/arm/mach-at91/board-sam9rlek.c
@@ -88,19 +88,14 @@ static struct mtd_partition __initdata ek_nand_partition[] = {
88 }, 88 },
89}; 89};
90 90
91static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
92{
93 *num_partitions = ARRAY_SIZE(ek_nand_partition);
94 return ek_nand_partition;
95}
96
97static struct atmel_nand_data __initdata ek_nand_data = { 91static struct atmel_nand_data __initdata ek_nand_data = {
98 .ale = 21, 92 .ale = 21,
99 .cle = 22, 93 .cle = 22,
100// .det_pin = ... not connected 94// .det_pin = ... not connected
101 .rdy_pin = AT91_PIN_PD17, 95 .rdy_pin = AT91_PIN_PD17,
102 .enable_pin = AT91_PIN_PB6, 96 .enable_pin = AT91_PIN_PB6,
103 .partition_info = nand_partitions, 97 .parts = ek_nand_partition,
98 .num_parts = ARRAY_SIZE(ek_nand_partition),
104}; 99};
105 100
106static struct sam9_smc_config __initdata ek_nand_smc_config = { 101static struct sam9_smc_config __initdata ek_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-snapper9260.c b/arch/arm/mach-at91/board-snapper9260.c
index c73d25e5faea..fbec934a7ce9 100644
--- a/arch/arm/mach-at91/board-snapper9260.c
+++ b/arch/arm/mach-at91/board-snapper9260.c
@@ -65,7 +65,7 @@ static struct at91_udc_data __initdata snapper9260_udc_data = {
65 .vbus_polled = 1, 65 .vbus_polled = 1,
66}; 66};
67 67
68static struct at91_eth_data snapper9260_macb_data = { 68static struct macb_platform_data snapper9260_macb_data = {
69 .is_rmii = 1, 69 .is_rmii = 1,
70}; 70};
71 71
@@ -97,18 +97,12 @@ static struct mtd_partition __initdata snapper9260_nand_partitions[] = {
97 }, 97 },
98}; 98};
99 99
100static struct mtd_partition * __init
101snapper9260_nand_partition_info(int size, int *num_partitions)
102{
103 *num_partitions = ARRAY_SIZE(snapper9260_nand_partitions);
104 return snapper9260_nand_partitions;
105}
106
107static struct atmel_nand_data __initdata snapper9260_nand_data = { 100static struct atmel_nand_data __initdata snapper9260_nand_data = {
108 .ale = 21, 101 .ale = 21,
109 .cle = 22, 102 .cle = 22,
110 .rdy_pin = AT91_PIN_PC13, 103 .rdy_pin = AT91_PIN_PC13,
111 .partition_info = snapper9260_nand_partition_info, 104 .parts = snapper9260_nand_partitions,
105 .num_parts = ARRAY_SIZE(snapper9260_nand_partitions),
112 .bus_width_16 = 0, 106 .bus_width_16 = 0,
113}; 107};
114 108
diff --git a/arch/arm/mach-at91/board-stamp9g20.c b/arch/arm/mach-at91/board-stamp9g20.c
index 936e5fd7f406..7c06c07d872b 100644
--- a/arch/arm/mach-at91/board-stamp9g20.c
+++ b/arch/arm/mach-at91/board-stamp9g20.c
@@ -157,7 +157,7 @@ static struct at91_udc_data __initdata stamp9g20evb_udc_data = {
157/* 157/*
158 * MACB Ethernet device 158 * MACB Ethernet device
159 */ 159 */
160static struct at91_eth_data __initdata macb_data = { 160static struct macb_platform_data __initdata macb_data = {
161 .phy_irq_pin = AT91_PIN_PA28, 161 .phy_irq_pin = AT91_PIN_PA28,
162 .is_rmii = 1, 162 .is_rmii = 1,
163}; 163};
diff --git a/arch/arm/mach-at91/board-usb-a926x.c b/arch/arm/mach-at91/board-usb-a926x.c
index 5852d3d9890c..3d84233f78eb 100644
--- a/arch/arm/mach-at91/board-usb-a926x.c
+++ b/arch/arm/mach-at91/board-usb-a926x.c
@@ -146,7 +146,7 @@ static void __init ek_add_device_spi(void)
146/* 146/*
147 * MACB Ethernet device 147 * MACB Ethernet device
148 */ 148 */
149static struct at91_eth_data __initdata ek_macb_data = { 149static struct macb_platform_data __initdata ek_macb_data = {
150 .phy_irq_pin = AT91_PIN_PE31, 150 .phy_irq_pin = AT91_PIN_PE31,
151 .is_rmii = 1, 151 .is_rmii = 1,
152}; 152};
@@ -190,19 +190,14 @@ static struct mtd_partition __initdata ek_nand_partition[] = {
190 } 190 }
191}; 191};
192 192
193static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
194{
195 *num_partitions = ARRAY_SIZE(ek_nand_partition);
196 return ek_nand_partition;
197}
198
199static struct atmel_nand_data __initdata ek_nand_data = { 193static struct atmel_nand_data __initdata ek_nand_data = {
200 .ale = 21, 194 .ale = 21,
201 .cle = 22, 195 .cle = 22,
202// .det_pin = ... not connected 196// .det_pin = ... not connected
203 .rdy_pin = AT91_PIN_PA22, 197 .rdy_pin = AT91_PIN_PA22,
204 .enable_pin = AT91_PIN_PD15, 198 .enable_pin = AT91_PIN_PD15,
205 .partition_info = nand_partitions, 199 .parts = ek_nand_partition,
200 .num_parts = ARRAY_SIZE(ek_nand_partition),
206}; 201};
207 202
208static struct sam9_smc_config __initdata usb_a9260_nand_smc_config = { 203static struct sam9_smc_config __initdata usb_a9260_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-yl-9200.c b/arch/arm/mach-at91/board-yl-9200.c
index 3c288b396fc4..2c40a21b2794 100644
--- a/arch/arm/mach-at91/board-yl-9200.c
+++ b/arch/arm/mach-at91/board-yl-9200.c
@@ -110,7 +110,7 @@ static struct gpio_led yl9200_leds[] = {
110/* 110/*
111 * Ethernet 111 * Ethernet
112 */ 112 */
113static struct at91_eth_data __initdata yl9200_eth_data = { 113static struct macb_platform_data __initdata yl9200_eth_data = {
114 .phy_irq_pin = AT91_PIN_PB28, 114 .phy_irq_pin = AT91_PIN_PB28,
115 .is_rmii = 1, 115 .is_rmii = 1,
116}; 116};
@@ -172,19 +172,14 @@ static struct mtd_partition __initdata yl9200_nand_partition[] = {
172 } 172 }
173}; 173};
174 174
175static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
176{
177 *num_partitions = ARRAY_SIZE(yl9200_nand_partition);
178 return yl9200_nand_partition;
179}
180
181static struct atmel_nand_data __initdata yl9200_nand_data = { 175static struct atmel_nand_data __initdata yl9200_nand_data = {
182 .ale = 6, 176 .ale = 6,
183 .cle = 7, 177 .cle = 7,
184 // .det_pin = ... not connected 178 // .det_pin = ... not connected
185 .rdy_pin = AT91_PIN_PC14, /* R/!B (Sheet10) */ 179 .rdy_pin = AT91_PIN_PC14, /* R/!B (Sheet10) */
186 .enable_pin = AT91_PIN_PC15, /* !CE (Sheet10) */ 180 .enable_pin = AT91_PIN_PC15, /* !CE (Sheet10) */
187 .partition_info = nand_partitions, 181 .parts = yl9200_nand_partition,
182 .num_parts = ARRAY_SIZE(yl9200_nand_partition),
188}; 183};
189 184
190/* 185/*
@@ -389,7 +384,7 @@ static struct spi_board_info yl9200_spi_devices[] = {
389#include <video/s1d13xxxfb.h> 384#include <video/s1d13xxxfb.h>
390 385
391 386
392static void __init yl9200_init_video(void) 387static void yl9200_init_video(void)
393{ 388{
394 /* NWAIT Signal */ 389 /* NWAIT Signal */
395 at91_set_A_periph(AT91_PIN_PC6, 0); 390 at91_set_A_periph(AT91_PIN_PC6, 0);
diff --git a/arch/arm/mach-at91/cpuidle.c b/arch/arm/mach-at91/cpuidle.c
index f474272c0eac..a851e6c98421 100644
--- a/arch/arm/mach-at91/cpuidle.c
+++ b/arch/arm/mach-at91/cpuidle.c
@@ -34,7 +34,8 @@ static struct cpuidle_driver at91_idle_driver = {
34 34
35/* Actual code that puts the SoC in different idle states */ 35/* Actual code that puts the SoC in different idle states */
36static int at91_enter_idle(struct cpuidle_device *dev, 36static int at91_enter_idle(struct cpuidle_device *dev,
37 struct cpuidle_state *state) 37 struct cpuidle_driver *drv,
38 int index)
38{ 39{
39 struct timeval before, after; 40 struct timeval before, after;
40 int idle_time; 41 int idle_time;
@@ -42,10 +43,10 @@ static int at91_enter_idle(struct cpuidle_device *dev,
42 43
43 local_irq_disable(); 44 local_irq_disable();
44 do_gettimeofday(&before); 45 do_gettimeofday(&before);
45 if (state == &dev->states[0]) 46 if (index == 0)
46 /* Wait for interrupt state */ 47 /* Wait for interrupt state */
47 cpu_do_idle(); 48 cpu_do_idle();
48 else if (state == &dev->states[1]) { 49 else if (index == 1) {
49 asm("b 1f; .align 5; 1:"); 50 asm("b 1f; .align 5; 1:");
50 asm("mcr p15, 0, r0, c7, c10, 4"); /* drain write buffer */ 51 asm("mcr p15, 0, r0, c7, c10, 4"); /* drain write buffer */
51 saved_lpr = sdram_selfrefresh_enable(); 52 saved_lpr = sdram_selfrefresh_enable();
@@ -56,34 +57,38 @@ static int at91_enter_idle(struct cpuidle_device *dev,
56 local_irq_enable(); 57 local_irq_enable();
57 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + 58 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
58 (after.tv_usec - before.tv_usec); 59 (after.tv_usec - before.tv_usec);
59 return idle_time; 60
61 dev->last_residency = idle_time;
62 return index;
60} 63}
61 64
62/* Initialize CPU idle by registering the idle states */ 65/* Initialize CPU idle by registering the idle states */
63static int at91_init_cpuidle(void) 66static int at91_init_cpuidle(void)
64{ 67{
65 struct cpuidle_device *device; 68 struct cpuidle_device *device;
66 69 struct cpuidle_driver *driver = &at91_idle_driver;
67 cpuidle_register_driver(&at91_idle_driver);
68 70
69 device = &per_cpu(at91_cpuidle_device, smp_processor_id()); 71 device = &per_cpu(at91_cpuidle_device, smp_processor_id());
70 device->state_count = AT91_MAX_STATES; 72 device->state_count = AT91_MAX_STATES;
73 driver->state_count = AT91_MAX_STATES;
71 74
72 /* Wait for interrupt state */ 75 /* Wait for interrupt state */
73 device->states[0].enter = at91_enter_idle; 76 driver->states[0].enter = at91_enter_idle;
74 device->states[0].exit_latency = 1; 77 driver->states[0].exit_latency = 1;
75 device->states[0].target_residency = 10000; 78 driver->states[0].target_residency = 10000;
76 device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; 79 driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
77 strcpy(device->states[0].name, "WFI"); 80 strcpy(driver->states[0].name, "WFI");
78 strcpy(device->states[0].desc, "Wait for interrupt"); 81 strcpy(driver->states[0].desc, "Wait for interrupt");
79 82
80 /* Wait for interrupt and RAM self refresh state */ 83 /* Wait for interrupt and RAM self refresh state */
81 device->states[1].enter = at91_enter_idle; 84 driver->states[1].enter = at91_enter_idle;
82 device->states[1].exit_latency = 10; 85 driver->states[1].exit_latency = 10;
83 device->states[1].target_residency = 10000; 86 driver->states[1].target_residency = 10000;
84 device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; 87 driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
85 strcpy(device->states[1].name, "RAM_SR"); 88 strcpy(driver->states[1].name, "RAM_SR");
86 strcpy(device->states[1].desc, "WFI and RAM Self Refresh"); 89 strcpy(driver->states[1].desc, "WFI and RAM Self Refresh");
90
91 cpuidle_register_driver(&at91_idle_driver);
87 92
88 if (cpuidle_register_device(device)) { 93 if (cpuidle_register_device(device)) {
89 printk(KERN_ERR "at91_init_cpuidle: Failed registering\n"); 94 printk(KERN_ERR "at91_init_cpuidle: Failed registering\n");
diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h
index d07767f4052e..e209a2992245 100644
--- a/arch/arm/mach-at91/include/mach/board.h
+++ b/arch/arm/mach-at91/include/mach/board.h
@@ -40,6 +40,7 @@
40#include <linux/atmel-mci.h> 40#include <linux/atmel-mci.h>
41#include <sound/atmel-ac97c.h> 41#include <sound/atmel-ac97c.h>
42#include <linux/serial.h> 42#include <linux/serial.h>
43#include <linux/platform_data/macb.h>
43 44
44 /* USB Device */ 45 /* USB Device */
45struct at91_udc_data { 46struct at91_udc_data {
@@ -81,18 +82,7 @@ extern void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
81 /* atmel-mci platform config */ 82 /* atmel-mci platform config */
82extern void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data); 83extern void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data);
83 84
84 /* Ethernet (EMAC & MACB) */ 85extern void __init at91_add_device_eth(struct macb_platform_data *data);
85struct at91_eth_data {
86 u32 phy_mask;
87 u8 phy_irq_pin; /* PHY IRQ */
88 u8 is_rmii; /* using RMII interface? */
89};
90extern void __init at91_add_device_eth(struct at91_eth_data *data);
91
92#if defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91SAM9G20) || defined(CONFIG_ARCH_AT91CAP9) \
93 || defined(CONFIG_ARCH_AT91SAM9G45)
94#define eth_platform_data at91_eth_data
95#endif
96 86
97 /* USB Host */ 87 /* USB Host */
98struct at91_usbh_data { 88struct at91_usbh_data {
@@ -117,7 +107,8 @@ struct atmel_nand_data {
117 u8 ale; /* address line number connected to ALE */ 107 u8 ale; /* address line number connected to ALE */
118 u8 cle; /* address line number connected to CLE */ 108 u8 cle; /* address line number connected to CLE */
119 u8 bus_width_16; /* buswidth is 16 bit */ 109 u8 bus_width_16; /* buswidth is 16 bit */
120 struct mtd_partition* (*partition_info)(int, int*); 110 struct mtd_partition *parts;
111 unsigned int num_parts;
121}; 112};
122extern void __init at91_add_device_nand(struct atmel_nand_data *data); 113extern void __init at91_add_device_nand(struct atmel_nand_data *data);
123 114
diff --git a/arch/arm/mach-at91/include/mach/vmalloc.h b/arch/arm/mach-at91/include/mach/vmalloc.h
index 8eb459f3f5b7..8e4a1bd0ab1d 100644
--- a/arch/arm/mach-at91/include/mach/vmalloc.h
+++ b/arch/arm/mach-at91/include/mach/vmalloc.h
@@ -21,6 +21,8 @@
21#ifndef __ASM_ARCH_VMALLOC_H 21#ifndef __ASM_ARCH_VMALLOC_H
22#define __ASM_ARCH_VMALLOC_H 22#define __ASM_ARCH_VMALLOC_H
23 23
24#include <mach/hardware.h>
25
24#define VMALLOC_END (AT91_VIRT_BASE & PGDIR_MASK) 26#define VMALLOC_END (AT91_VIRT_BASE & PGDIR_MASK)
25 27
26#endif 28#endif
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index 26d94c0b555c..11c3db985285 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -377,7 +377,7 @@ static struct davinci_nand_pdata da830_evm_nand_pdata = {
377 .nr_parts = ARRAY_SIZE(da830_evm_nand_partitions), 377 .nr_parts = ARRAY_SIZE(da830_evm_nand_partitions),
378 .ecc_mode = NAND_ECC_HW, 378 .ecc_mode = NAND_ECC_HW,
379 .ecc_bits = 4, 379 .ecc_bits = 4,
380 .options = NAND_USE_FLASH_BBT, 380 .bbt_options = NAND_BBT_USE_FLASH,
381 .bbt_td = &da830_evm_nand_bbt_main_descr, 381 .bbt_td = &da830_evm_nand_bbt_main_descr,
382 .bbt_md = &da830_evm_nand_bbt_mirror_descr, 382 .bbt_md = &da830_evm_nand_bbt_mirror_descr,
383 .timing = &da830_evm_nandflash_timing, 383 .timing = &da830_evm_nandflash_timing,
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index ec21663f8ddc..1d7d24995226 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -256,7 +256,7 @@ static struct davinci_nand_pdata da850_evm_nandflash_data = {
256 .nr_parts = ARRAY_SIZE(da850_evm_nandflash_partition), 256 .nr_parts = ARRAY_SIZE(da850_evm_nandflash_partition),
257 .ecc_mode = NAND_ECC_HW, 257 .ecc_mode = NAND_ECC_HW,
258 .ecc_bits = 4, 258 .ecc_bits = 4,
259 .options = NAND_USE_FLASH_BBT, 259 .bbt_options = NAND_BBT_USE_FLASH,
260 .timing = &da850_evm_nandflash_timing, 260 .timing = &da850_evm_nandflash_timing,
261}; 261};
262 262
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 65566280b7c9..4e0e707c313d 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -77,7 +77,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
77 .parts = davinci_nand_partitions, 77 .parts = davinci_nand_partitions,
78 .nr_parts = ARRAY_SIZE(davinci_nand_partitions), 78 .nr_parts = ARRAY_SIZE(davinci_nand_partitions),
79 .ecc_mode = NAND_ECC_HW, 79 .ecc_mode = NAND_ECC_HW,
80 .options = NAND_USE_FLASH_BBT, 80 .bbt_options = NAND_BBT_USE_FLASH,
81 .ecc_bits = 4, 81 .ecc_bits = 4,
82}; 82};
83 83
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index b307470b071d..ff2d2413279a 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -74,7 +74,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
74 .parts = davinci_nand_partitions, 74 .parts = davinci_nand_partitions,
75 .nr_parts = ARRAY_SIZE(davinci_nand_partitions), 75 .nr_parts = ARRAY_SIZE(davinci_nand_partitions),
76 .ecc_mode = NAND_ECC_HW_SYNDROME, 76 .ecc_mode = NAND_ECC_HW_SYNDROME,
77 .options = NAND_USE_FLASH_BBT, 77 .bbt_options = NAND_BBT_USE_FLASH,
78}; 78};
79 79
80static struct resource davinci_nand_resources[] = { 80static struct resource davinci_nand_resources[] = {
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 04c43abcca66..1918ae711428 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -139,7 +139,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
139 .parts = davinci_nand_partitions, 139 .parts = davinci_nand_partitions,
140 .nr_parts = ARRAY_SIZE(davinci_nand_partitions), 140 .nr_parts = ARRAY_SIZE(davinci_nand_partitions),
141 .ecc_mode = NAND_ECC_HW, 141 .ecc_mode = NAND_ECC_HW,
142 .options = NAND_USE_FLASH_BBT, 142 .bbt_options = NAND_BBT_USE_FLASH,
143 .ecc_bits = 4, 143 .ecc_bits = 4,
144}; 144};
145 145
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 28fafa7819bc..0cf8abf78d33 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -151,7 +151,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = {
151 .parts = davinci_evm_nandflash_partition, 151 .parts = davinci_evm_nandflash_partition,
152 .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), 152 .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition),
153 .ecc_mode = NAND_ECC_HW, 153 .ecc_mode = NAND_ECC_HW,
154 .options = NAND_USE_FLASH_BBT, 154 .bbt_options = NAND_BBT_USE_FLASH,
155 .timing = &davinci_evm_nandflash_timing, 155 .timing = &davinci_evm_nandflash_timing,
156}; 156};
157 157
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index 6efc84cceca0..3cfff555e8f2 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -396,7 +396,8 @@ static struct davinci_nand_pdata mityomapl138_nandflash_data = {
396 .parts = mityomapl138_nandflash_partition, 396 .parts = mityomapl138_nandflash_partition,
397 .nr_parts = ARRAY_SIZE(mityomapl138_nandflash_partition), 397 .nr_parts = ARRAY_SIZE(mityomapl138_nandflash_partition),
398 .ecc_mode = NAND_ECC_HW, 398 .ecc_mode = NAND_ECC_HW,
399 .options = NAND_USE_FLASH_BBT | NAND_BUSWIDTH_16, 399 .bbt_options = NAND_BBT_USE_FLASH,
400 .options = NAND_BUSWIDTH_16,
400 .ecc_bits = 1, /* 4 bit mode is not supported with 16 bit NAND */ 401 .ecc_bits = 1, /* 4 bit mode is not supported with 16 bit NAND */
401}; 402};
402 403
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 38d6f644d8b9..e5f231aefee4 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -87,7 +87,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = {
87 .parts = davinci_ntosd2_nandflash_partition, 87 .parts = davinci_ntosd2_nandflash_partition,
88 .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), 88 .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition),
89 .ecc_mode = NAND_ECC_HW, 89 .ecc_mode = NAND_ECC_HW,
90 .options = NAND_USE_FLASH_BBT, 90 .bbt_options = NAND_BBT_USE_FLASH,
91}; 91};
92 92
93static struct resource davinci_ntosd2_nandflash_resource[] = { 93static struct resource davinci_ntosd2_nandflash_resource[] = {
diff --git a/arch/arm/mach-davinci/board-tnetv107x-evm.c b/arch/arm/mach-davinci/board-tnetv107x-evm.c
index 90ee7b5aabdc..f69e40a29e02 100644
--- a/arch/arm/mach-davinci/board-tnetv107x-evm.c
+++ b/arch/arm/mach-davinci/board-tnetv107x-evm.c
@@ -144,7 +144,7 @@ static struct davinci_nand_pdata nand_config = {
144 .parts = nand_partitions, 144 .parts = nand_partitions,
145 .nr_parts = ARRAY_SIZE(nand_partitions), 145 .nr_parts = ARRAY_SIZE(nand_partitions),
146 .ecc_mode = NAND_ECC_HW, 146 .ecc_mode = NAND_ECC_HW,
147 .options = NAND_USE_FLASH_BBT, 147 .bbt_options = NAND_BBT_USE_FLASH,
148 .ecc_bits = 1, 148 .ecc_bits = 1,
149}; 149};
150 150
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index 60d2f4871afa..a30c7c5a6d83 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -79,9 +79,11 @@ static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = {
79 79
80/* Actual code that puts the SoC in different idle states */ 80/* Actual code that puts the SoC in different idle states */
81static int davinci_enter_idle(struct cpuidle_device *dev, 81static int davinci_enter_idle(struct cpuidle_device *dev,
82 struct cpuidle_state *state) 82 struct cpuidle_driver *drv,
83 int index)
83{ 84{
84 struct davinci_ops *ops = cpuidle_get_statedata(state); 85 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
86 struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
85 struct timeval before, after; 87 struct timeval before, after;
86 int idle_time; 88 int idle_time;
87 89
@@ -99,13 +101,17 @@ static int davinci_enter_idle(struct cpuidle_device *dev,
99 local_irq_enable(); 101 local_irq_enable();
100 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + 102 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
101 (after.tv_usec - before.tv_usec); 103 (after.tv_usec - before.tv_usec);
102 return idle_time; 104
105 dev->last_residency = idle_time;
106
107 return index;
103} 108}
104 109
105static int __init davinci_cpuidle_probe(struct platform_device *pdev) 110static int __init davinci_cpuidle_probe(struct platform_device *pdev)
106{ 111{
107 int ret; 112 int ret;
108 struct cpuidle_device *device; 113 struct cpuidle_device *device;
114 struct cpuidle_driver *driver = &davinci_idle_driver;
109 struct davinci_cpuidle_config *pdata = pdev->dev.platform_data; 115 struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;
110 116
111 device = &per_cpu(davinci_cpuidle_device, smp_processor_id()); 117 device = &per_cpu(davinci_cpuidle_device, smp_processor_id());
@@ -117,32 +123,33 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
117 123
118 ddr2_reg_base = pdata->ddr2_ctlr_base; 124 ddr2_reg_base = pdata->ddr2_ctlr_base;
119 125
120 ret = cpuidle_register_driver(&davinci_idle_driver);
121 if (ret) {
122 dev_err(&pdev->dev, "failed to register driver\n");
123 return ret;
124 }
125
126 /* Wait for interrupt state */ 126 /* Wait for interrupt state */
127 device->states[0].enter = davinci_enter_idle; 127 driver->states[0].enter = davinci_enter_idle;
128 device->states[0].exit_latency = 1; 128 driver->states[0].exit_latency = 1;
129 device->states[0].target_residency = 10000; 129 driver->states[0].target_residency = 10000;
130 device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; 130 driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
131 strcpy(device->states[0].name, "WFI"); 131 strcpy(driver->states[0].name, "WFI");
132 strcpy(device->states[0].desc, "Wait for interrupt"); 132 strcpy(driver->states[0].desc, "Wait for interrupt");
133 133
134 /* Wait for interrupt and DDR self refresh state */ 134 /* Wait for interrupt and DDR self refresh state */
135 device->states[1].enter = davinci_enter_idle; 135 driver->states[1].enter = davinci_enter_idle;
136 device->states[1].exit_latency = 10; 136 driver->states[1].exit_latency = 10;
137 device->states[1].target_residency = 10000; 137 driver->states[1].target_residency = 10000;
138 device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; 138 driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
139 strcpy(device->states[1].name, "DDR SR"); 139 strcpy(driver->states[1].name, "DDR SR");
140 strcpy(device->states[1].desc, "WFI and DDR Self Refresh"); 140 strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
141 if (pdata->ddr2_pdown) 141 if (pdata->ddr2_pdown)
142 davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN; 142 davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
143 cpuidle_set_statedata(&device->states[1], &davinci_states[1]); 143 cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]);
144 144
145 device->state_count = DAVINCI_CPUIDLE_MAX_STATES; 145 device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
146 driver->state_count = DAVINCI_CPUIDLE_MAX_STATES;
147
148 ret = cpuidle_register_driver(&davinci_idle_driver);
149 if (ret) {
150 dev_err(&pdev->dev, "failed to register driver\n");
151 return ret;
152 }
146 153
147 ret = cpuidle_register_device(device); 154 ret = cpuidle_register_device(device);
148 if (ret) { 155 if (ret) {
diff --git a/arch/arm/mach-davinci/include/mach/nand.h b/arch/arm/mach-davinci/include/mach/nand.h
index 025151049f05..1cf555aef896 100644
--- a/arch/arm/mach-davinci/include/mach/nand.h
+++ b/arch/arm/mach-davinci/include/mach/nand.h
@@ -74,8 +74,10 @@ struct davinci_nand_pdata { /* platform_data */
74 nand_ecc_modes_t ecc_mode; 74 nand_ecc_modes_t ecc_mode;
75 u8 ecc_bits; 75 u8 ecc_bits;
76 76
77 /* e.g. NAND_BUSWIDTH_16 or NAND_USE_FLASH_BBT */ 77 /* e.g. NAND_BUSWIDTH_16 */
78 unsigned options; 78 unsigned options;
79 /* e.g. NAND_BBT_USE_FLASH */
80 unsigned bbt_options;
79 81
80 /* Main and mirror bbt descriptor overrides */ 82 /* Main and mirror bbt descriptor overrides */
81 struct nand_bbt_descr *bbt_td; 83 struct nand_bbt_descr *bbt_td;
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 1ade3c340507..8b2f1435bcac 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -116,8 +116,9 @@ static struct mtd_partition ts72xx_nand_parts[] = {
116 .mask_flags = MTD_WRITEABLE, /* force read-only */ 116 .mask_flags = MTD_WRITEABLE, /* force read-only */
117 }, { 117 }, {
118 .name = "Linux", 118 .name = "Linux",
119 .offset = MTDPART_OFS_APPEND, 119 .offset = MTDPART_OFS_RETAIN,
120 .size = 0, /* filled in later */ 120 .size = TS72XX_REDBOOT_PART_SIZE,
121 /* leave so much for last partition */
121 }, { 122 }, {
122 .name = "RedBoot", 123 .name = "RedBoot",
123 .offset = MTDPART_OFS_APPEND, 124 .offset = MTDPART_OFS_APPEND,
@@ -126,28 +127,14 @@ static struct mtd_partition ts72xx_nand_parts[] = {
126 }, 127 },
127}; 128};
128 129
129static void ts72xx_nand_set_parts(uint64_t size,
130 struct platform_nand_chip *chip)
131{
132 /* Factory TS-72xx boards only come with 32MiB or 128MiB NAND options */
133 if (size == SZ_32M || size == SZ_128M) {
134 /* Set the "Linux" partition size */
135 ts72xx_nand_parts[1].size = size - TS72XX_REDBOOT_PART_SIZE;
136
137 chip->partitions = ts72xx_nand_parts;
138 chip->nr_partitions = ARRAY_SIZE(ts72xx_nand_parts);
139 } else {
140 pr_warning("Unknown nand disk size:%lluMiB\n", size >> 20);
141 }
142}
143
144static struct platform_nand_data ts72xx_nand_data = { 130static struct platform_nand_data ts72xx_nand_data = {
145 .chip = { 131 .chip = {
146 .nr_chips = 1, 132 .nr_chips = 1,
147 .chip_offset = 0, 133 .chip_offset = 0,
148 .chip_delay = 15, 134 .chip_delay = 15,
149 .part_probe_types = ts72xx_nand_part_probes, 135 .part_probe_types = ts72xx_nand_part_probes,
150 .set_parts = ts72xx_nand_set_parts, 136 .partitions = ts72xx_nand_parts,
137 .nr_partitions = ARRAY_SIZE(ts72xx_nand_parts),
151 }, 138 },
152 .ctrl = { 139 .ctrl = {
153 .cmd_ctrl = ts72xx_nand_hwcontrol, 140 .cmd_ctrl = ts72xx_nand_hwcontrol,
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index bf7e96f2793a..35f6502144ae 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -16,7 +16,8 @@
16#include <asm/proc-fns.h> 16#include <asm/proc-fns.h>
17 17
18static int exynos4_enter_idle(struct cpuidle_device *dev, 18static int exynos4_enter_idle(struct cpuidle_device *dev,
19 struct cpuidle_state *state); 19 struct cpuidle_driver *drv,
20 int index);
20 21
21static struct cpuidle_state exynos4_cpuidle_set[] = { 22static struct cpuidle_state exynos4_cpuidle_set[] = {
22 [0] = { 23 [0] = {
@@ -37,7 +38,8 @@ static struct cpuidle_driver exynos4_idle_driver = {
37}; 38};
38 39
39static int exynos4_enter_idle(struct cpuidle_device *dev, 40static int exynos4_enter_idle(struct cpuidle_device *dev,
40 struct cpuidle_state *state) 41 struct cpuidle_driver *drv,
42 int index)
41{ 43{
42 struct timeval before, after; 44 struct timeval before, after;
43 int idle_time; 45 int idle_time;
@@ -52,29 +54,31 @@ static int exynos4_enter_idle(struct cpuidle_device *dev,
52 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + 54 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
53 (after.tv_usec - before.tv_usec); 55 (after.tv_usec - before.tv_usec);
54 56
55 return idle_time; 57 dev->last_residency = idle_time;
58 return index;
56} 59}
57 60
58static int __init exynos4_init_cpuidle(void) 61static int __init exynos4_init_cpuidle(void)
59{ 62{
60 int i, max_cpuidle_state, cpu_id; 63 int i, max_cpuidle_state, cpu_id;
61 struct cpuidle_device *device; 64 struct cpuidle_device *device;
62 65 struct cpuidle_driver *drv = &exynos4_idle_driver;
66
67 /* Setup cpuidle driver */
68 drv->state_count = (sizeof(exynos4_cpuidle_set) /
69 sizeof(struct cpuidle_state));
70 max_cpuidle_state = drv->state_count;
71 for (i = 0; i < max_cpuidle_state; i++) {
72 memcpy(&drv->states[i], &exynos4_cpuidle_set[i],
73 sizeof(struct cpuidle_state));
74 }
63 cpuidle_register_driver(&exynos4_idle_driver); 75 cpuidle_register_driver(&exynos4_idle_driver);
64 76
65 for_each_cpu(cpu_id, cpu_online_mask) { 77 for_each_cpu(cpu_id, cpu_online_mask) {
66 device = &per_cpu(exynos4_cpuidle_device, cpu_id); 78 device = &per_cpu(exynos4_cpuidle_device, cpu_id);
67 device->cpu = cpu_id; 79 device->cpu = cpu_id;
68 80
69 device->state_count = (sizeof(exynos4_cpuidle_set) / 81 device->state_count = drv->state_count;
70 sizeof(struct cpuidle_state));
71
72 max_cpuidle_state = device->state_count;
73
74 for (i = 0; i < max_cpuidle_state; i++) {
75 memcpy(&device->states[i], &exynos4_cpuidle_set[i],
76 sizeof(struct cpuidle_state));
77 }
78 82
79 if (cpuidle_register_device(device)) { 83 if (cpuidle_register_device(device)) {
80 printk(KERN_ERR "CPUidle register device failed\n,"); 84 printk(KERN_ERR "CPUidle register device failed\n,");
diff --git a/arch/arm/mach-imx/Makefile.boot b/arch/arm/mach-imx/Makefile.boot
index 22d85889f622..cfede5768aa0 100644
--- a/arch/arm/mach-imx/Makefile.boot
+++ b/arch/arm/mach-imx/Makefile.boot
@@ -1,22 +1,26 @@
1zreladdr-$(CONFIG_ARCH_MX1) += 0x08008000 1zreladdr-$(CONFIG_SOC_IMX1) += 0x08008000
2params_phys-$(CONFIG_ARCH_MX1) := 0x08000100 2params_phys-$(CONFIG_SOC_IMX1) := 0x08000100
3initrd_phys-$(CONFIG_ARCH_MX1) := 0x08800000 3initrd_phys-$(CONFIG_SOC_IMX1) := 0x08800000
4 4
5zreladdr-$(CONFIG_MACH_MX21) += 0xC0008000 5zreladdr-$(CONFIG_SOC_IMX21) += 0xC0008000
6params_phys-$(CONFIG_MACH_MX21) := 0xC0000100 6params_phys-$(CONFIG_SOC_IMX21) := 0xC0000100
7initrd_phys-$(CONFIG_MACH_MX21) := 0xC0800000 7initrd_phys-$(CONFIG_SOC_IMX21) := 0xC0800000
8 8
9zreladdr-$(CONFIG_ARCH_MX25) += 0x80008000 9zreladdr-$(CONFIG_SOC_IMX25) += 0x80008000
10params_phys-$(CONFIG_ARCH_MX25) := 0x80000100 10params_phys-$(CONFIG_SOC_IMX25) := 0x80000100
11initrd_phys-$(CONFIG_ARCH_MX25) := 0x80800000 11initrd_phys-$(CONFIG_SOC_IMX25) := 0x80800000
12 12
13zreladdr-$(CONFIG_MACH_MX27) += 0xA0008000 13zreladdr-$(CONFIG_SOC_IMX27) += 0xA0008000
14params_phys-$(CONFIG_MACH_MX27) := 0xA0000100 14params_phys-$(CONFIG_SOC_IMX27) := 0xA0000100
15initrd_phys-$(CONFIG_MACH_MX27) := 0xA0800000 15initrd_phys-$(CONFIG_SOC_IMX27) := 0xA0800000
16 16
17zreladdr-$(CONFIG_ARCH_MX3) += 0x80008000 17zreladdr-$(CONFIG_SOC_IMX31) += 0x80008000
18params_phys-$(CONFIG_ARCH_MX3) := 0x80000100 18params_phys-$(CONFIG_SOC_IMX31) := 0x80000100
19initrd_phys-$(CONFIG_ARCH_MX3) := 0x80800000 19initrd_phys-$(CONFIG_SOC_IMX31) := 0x80800000
20
21zreladdr-$(CONFIG_SOC_IMX35) += 0x80008000
22params_phys-$(CONFIG_SOC_IMX35) := 0x80000100
23initrd_phys-$(CONFIG_SOC_IMX35) := 0x80800000
20 24
21zreladdr-$(CONFIG_SOC_IMX6Q) += 0x10008000 25zreladdr-$(CONFIG_SOC_IMX6Q) += 0x10008000
22params_phys-$(CONFIG_SOC_IMX6Q) := 0x10000100 26params_phys-$(CONFIG_SOC_IMX6Q) := 0x10000100
diff --git a/arch/arm/mach-imx/clock-imx6q.c b/arch/arm/mach-imx/clock-imx6q.c
index e0b926dfeced..613a1b993bff 100644
--- a/arch/arm/mach-imx/clock-imx6q.c
+++ b/arch/arm/mach-imx/clock-imx6q.c
@@ -1139,7 +1139,7 @@ static int _clk_set_rate(struct clk *clk, unsigned long rate)
1139 return -EINVAL; 1139 return -EINVAL;
1140 1140
1141 max_div = ((d->bm_pred >> d->bp_pred) + 1) * 1141 max_div = ((d->bm_pred >> d->bp_pred) + 1) *
1142 ((d->bm_pred >> d->bp_pred) + 1); 1142 ((d->bm_podf >> d->bp_podf) + 1);
1143 1143
1144 div = parent_rate / rate; 1144 div = parent_rate / rate;
1145 if (div == 0) 1145 if (div == 0)
@@ -2002,6 +2002,21 @@ int __init mx6q_clocks_init(void)
2002 clk_set_rate(&asrc_serial_clk, 1500000); 2002 clk_set_rate(&asrc_serial_clk, 1500000);
2003 clk_set_rate(&enfc_clk, 11000000); 2003 clk_set_rate(&enfc_clk, 11000000);
2004 2004
2005 /*
2006 * Before pinctrl API is available, we have to rely on the pad
2007 * configuration set up by bootloader. For usdhc example here,
2008 * u-boot sets up the pads for 49.5 MHz case, and we have to lower
2009 * the usdhc clock from 198 to 49.5 MHz to match the pad configuration.
2010 *
2011 * FIXME: This is should be removed after pinctrl API is available.
2012 * At that time, usdhc driver can call pinctrl API to change pad
2013 * configuration dynamically per different usdhc clock settings.
2014 */
2015 clk_set_rate(&usdhc1_clk, 49500000);
2016 clk_set_rate(&usdhc2_clk, 49500000);
2017 clk_set_rate(&usdhc3_clk, 49500000);
2018 clk_set_rate(&usdhc4_clk, 49500000);
2019
2005 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); 2020 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
2006 base = of_iomap(np, 0); 2021 base = of_iomap(np, 0);
2007 WARN_ON(!base); 2022 WARN_ON(!base);
diff --git a/arch/arm/mach-kirkwood/cpuidle.c b/arch/arm/mach-kirkwood/cpuidle.c
index 864e569f684e..7088180b018b 100644
--- a/arch/arm/mach-kirkwood/cpuidle.c
+++ b/arch/arm/mach-kirkwood/cpuidle.c
@@ -33,17 +33,18 @@ static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
33 33
34/* Actual code that puts the SoC in different idle states */ 34/* Actual code that puts the SoC in different idle states */
35static int kirkwood_enter_idle(struct cpuidle_device *dev, 35static int kirkwood_enter_idle(struct cpuidle_device *dev,
36 struct cpuidle_state *state) 36 struct cpuidle_driver *drv,
37 int index)
37{ 38{
38 struct timeval before, after; 39 struct timeval before, after;
39 int idle_time; 40 int idle_time;
40 41
41 local_irq_disable(); 42 local_irq_disable();
42 do_gettimeofday(&before); 43 do_gettimeofday(&before);
43 if (state == &dev->states[0]) 44 if (index == 0)
44 /* Wait for interrupt state */ 45 /* Wait for interrupt state */
45 cpu_do_idle(); 46 cpu_do_idle();
46 else if (state == &dev->states[1]) { 47 else if (index == 1) {
47 /* 48 /*
48 * Following write will put DDR in self refresh. 49 * Following write will put DDR in self refresh.
49 * Note that we have 256 cycles before DDR puts it 50 * Note that we have 256 cycles before DDR puts it
@@ -58,35 +59,40 @@ static int kirkwood_enter_idle(struct cpuidle_device *dev,
58 local_irq_enable(); 59 local_irq_enable();
59 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + 60 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
60 (after.tv_usec - before.tv_usec); 61 (after.tv_usec - before.tv_usec);
61 return idle_time; 62
63 /* Update last residency */
64 dev->last_residency = idle_time;
65
66 return index;
62} 67}
63 68
64/* Initialize CPU idle by registering the idle states */ 69/* Initialize CPU idle by registering the idle states */
65static int kirkwood_init_cpuidle(void) 70static int kirkwood_init_cpuidle(void)
66{ 71{
67 struct cpuidle_device *device; 72 struct cpuidle_device *device;
68 73 struct cpuidle_driver *driver = &kirkwood_idle_driver;
69 cpuidle_register_driver(&kirkwood_idle_driver);
70 74
71 device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id()); 75 device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
72 device->state_count = KIRKWOOD_MAX_STATES; 76 device->state_count = KIRKWOOD_MAX_STATES;
77 driver->state_count = KIRKWOOD_MAX_STATES;
73 78
74 /* Wait for interrupt state */ 79 /* Wait for interrupt state */
75 device->states[0].enter = kirkwood_enter_idle; 80 driver->states[0].enter = kirkwood_enter_idle;
76 device->states[0].exit_latency = 1; 81 driver->states[0].exit_latency = 1;
77 device->states[0].target_residency = 10000; 82 driver->states[0].target_residency = 10000;
78 device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; 83 driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
79 strcpy(device->states[0].name, "WFI"); 84 strcpy(driver->states[0].name, "WFI");
80 strcpy(device->states[0].desc, "Wait for interrupt"); 85 strcpy(driver->states[0].desc, "Wait for interrupt");
81 86
82 /* Wait for interrupt and DDR self refresh state */ 87 /* Wait for interrupt and DDR self refresh state */
83 device->states[1].enter = kirkwood_enter_idle; 88 driver->states[1].enter = kirkwood_enter_idle;
84 device->states[1].exit_latency = 10; 89 driver->states[1].exit_latency = 10;
85 device->states[1].target_residency = 10000; 90 driver->states[1].target_residency = 10000;
86 device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; 91 driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
87 strcpy(device->states[1].name, "DDR SR"); 92 strcpy(driver->states[1].name, "DDR SR");
88 strcpy(device->states[1].desc, "WFI and DDR Self Refresh"); 93 strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
89 94
95 cpuidle_register_driver(&kirkwood_idle_driver);
90 if (cpuidle_register_device(device)) { 96 if (cpuidle_register_device(device)) {
91 printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n"); 97 printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n");
92 return -EIO; 98 return -EIO;
diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c
index edcbadad31c1..f0d236dfb02b 100644
--- a/arch/arm/mach-mmp/aspenite.c
+++ b/arch/arm/mach-mmp/aspenite.c
@@ -167,8 +167,9 @@ static struct mtd_partition aspenite_nand_partitions[] = {
167 167
168static struct pxa3xx_nand_platform_data aspenite_nand_info = { 168static struct pxa3xx_nand_platform_data aspenite_nand_info = {
169 .enable_arbiter = 1, 169 .enable_arbiter = 1,
170 .parts = aspenite_nand_partitions, 170 .num_cs = 1,
171 .nr_parts = ARRAY_SIZE(aspenite_nand_partitions), 171 .parts[0] = aspenite_nand_partitions,
172 .nr_parts[0] = ARRAY_SIZE(aspenite_nand_partitions),
172}; 173};
173 174
174static struct i2c_board_info aspenite_i2c_info[] __initdata = { 175static struct i2c_board_info aspenite_i2c_info[] __initdata = {
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 4285dfd80b6f..4ad3969b9881 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -15,6 +15,8 @@ obj-$(CONFIG_MSM_SMD) += smd.o smd_debug.o
15obj-$(CONFIG_MSM_SMD) += last_radio_log.o 15obj-$(CONFIG_MSM_SMD) += last_radio_log.o
16obj-$(CONFIG_MSM_SCM) += scm.o scm-boot.o 16obj-$(CONFIG_MSM_SCM) += scm.o scm-boot.o
17 17
18CFLAGS_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
19
18obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 20obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
19obj-$(CONFIG_SMP) += headsmp.o platsmp.o 21obj-$(CONFIG_SMP) += headsmp.o platsmp.o
20 22
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
index 71de5062c71e..db81ed531031 100644
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -42,8 +42,8 @@
42 42
43extern struct sys_timer msm_timer; 43extern struct sys_timer msm_timer;
44 44
45static void __init msm7x30_fixup(struct machine_desc *desc, struct tag *tag, 45static void __init msm7x30_fixup(struct tag *tag, char **cmdline,
46 char **cmdline, struct meminfo *mi) 46 struct meminfo *mi)
47{ 47{
48 for (; tag->hdr.size; tag = tag_next(tag)) 48 for (; tag->hdr.size; tag = tag_next(tag))
49 if (tag->hdr.tag == ATAG_MEM && tag->u.mem.start == 0x200000) { 49 if (tag->hdr.tag == ATAG_MEM && tag->u.mem.start == 0x200000) {
diff --git a/arch/arm/mach-msm/board-msm8960.c b/arch/arm/mach-msm/board-msm8960.c
index b04468e7d00e..6dc1cbd2a595 100644
--- a/arch/arm/mach-msm/board-msm8960.c
+++ b/arch/arm/mach-msm/board-msm8960.c
@@ -32,8 +32,8 @@
32 32
33#include "devices.h" 33#include "devices.h"
34 34
35static void __init msm8960_fixup(struct machine_desc *desc, struct tag *tag, 35static void __init msm8960_fixup(struct tag *tag, char **cmdline,
36 char **cmdline, struct meminfo *mi) 36 struct meminfo *mi)
37{ 37{
38 for (; tag->hdr.size; tag = tag_next(tag)) 38 for (; tag->hdr.size; tag = tag_next(tag))
39 if (tag->hdr.tag == ATAG_MEM && 39 if (tag->hdr.tag == ATAG_MEM &&
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index cf38e2284fa9..44bf71688373 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -28,8 +28,8 @@
28#include <mach/board.h> 28#include <mach/board.h>
29#include <mach/msm_iomap.h> 29#include <mach/msm_iomap.h>
30 30
31static void __init msm8x60_fixup(struct machine_desc *desc, struct tag *tag, 31static void __init msm8x60_fixup(struct tag *tag, char **cmdline,
32 char **cmdline, struct meminfo *mi) 32 struct meminfo *mi)
33{ 33{
34 for (; tag->hdr.size; tag = tag_next(tag)) 34 for (; tag->hdr.size; tag = tag_next(tag))
35 if (tag->hdr.tag == ATAG_MEM && 35 if (tag->hdr.tag == ATAG_MEM &&
diff --git a/arch/arm/mach-msm/scm.c b/arch/arm/mach-msm/scm.c
index 232f97a04504..bafabb502580 100644
--- a/arch/arm/mach-msm/scm.c
+++ b/arch/arm/mach-msm/scm.c
@@ -180,6 +180,9 @@ static u32 smc(u32 cmd_addr)
180 __asmeq("%1", "r0") 180 __asmeq("%1", "r0")
181 __asmeq("%2", "r1") 181 __asmeq("%2", "r1")
182 __asmeq("%3", "r2") 182 __asmeq("%3", "r2")
183#ifdef REQUIRES_SEC
184 ".arch_extension sec\n"
185#endif
183 "smc #0 @ switch to secure world\n" 186 "smc #0 @ switch to secure world\n"
184 : "=r" (r0) 187 : "=r" (r0)
185 : "r" (r0), "r" (r1), "r" (r2) 188 : "r" (r0), "r" (r1), "r" (r2)
diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-mx5/clock-mx51-mx53.c
index 2aacf41c48e7..4cb276977190 100644
--- a/arch/arm/mach-mx5/clock-mx51-mx53.c
+++ b/arch/arm/mach-mx5/clock-mx51-mx53.c
@@ -1281,9 +1281,9 @@ DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET,
1281 NULL, NULL, &ipg_clk, &gpt_ipg_clk); 1281 NULL, NULL, &ipg_clk, &gpt_ipg_clk);
1282 1282
1283DEFINE_CLOCK(pwm1_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG6_OFFSET, 1283DEFINE_CLOCK(pwm1_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG6_OFFSET,
1284 NULL, NULL, &ipg_clk, NULL); 1284 NULL, NULL, &ipg_perclk, NULL);
1285DEFINE_CLOCK(pwm2_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG8_OFFSET, 1285DEFINE_CLOCK(pwm2_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG8_OFFSET,
1286 NULL, NULL, &ipg_clk, NULL); 1286 NULL, NULL, &ipg_perclk, NULL);
1287 1287
1288/* I2C */ 1288/* I2C */
1289DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG9_OFFSET, 1289DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG9_OFFSET,
@@ -1634,6 +1634,7 @@ int __init mx53_clocks_init(unsigned long ckil, unsigned long osc,
1634 return 0; 1634 return 0;
1635} 1635}
1636 1636
1637#ifdef CONFIG_OF
1637static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc, 1638static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc,
1638 unsigned long *ckih1, unsigned long *ckih2) 1639 unsigned long *ckih1, unsigned long *ckih2)
1639{ 1640{
@@ -1671,3 +1672,4 @@ int __init mx53_clocks_init_dt(void)
1671 clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2); 1672 clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
1672 return mx53_clocks_init(ckil, osc, ckih1, ckih2); 1673 return mx53_clocks_init(ckil, osc, ckih1, ckih2);
1673} 1674}
1675#endif
diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c
index ac2316d53d3c..064ec5abaa55 100644
--- a/arch/arm/mach-mxs/mach-mx28evk.c
+++ b/arch/arm/mach-mxs/mach-mx28evk.c
@@ -471,7 +471,8 @@ static void __init mx28evk_init(void)
471 "mmc0-slot-power"); 471 "mmc0-slot-power");
472 if (ret) 472 if (ret)
473 pr_warn("failed to request gpio mmc0-slot-power: %d\n", ret); 473 pr_warn("failed to request gpio mmc0-slot-power: %d\n", ret);
474 mx28_add_mxs_mmc(0, &mx28evk_mmc_pdata[0]); 474 else
475 mx28_add_mxs_mmc(0, &mx28evk_mmc_pdata[0]);
475 476
476 ret = gpio_request_one(MX28EVK_MMC1_SLOT_POWER, GPIOF_OUT_INIT_LOW, 477 ret = gpio_request_one(MX28EVK_MMC1_SLOT_POWER, GPIOF_OUT_INIT_LOW,
477 "mmc1-slot-power"); 478 "mmc1-slot-power");
@@ -480,7 +481,6 @@ static void __init mx28evk_init(void)
480 else 481 else
481 mx28_add_mxs_mmc(1, &mx28evk_mmc_pdata[1]); 482 mx28_add_mxs_mmc(1, &mx28evk_mmc_pdata[1]);
482 483
483 mx28_add_mxs_mmc(1, &mx28evk_mmc_pdata[1]);
484 mx28_add_rtc_stmp3xxx(); 484 mx28_add_rtc_stmp3xxx();
485 485
486 gpio_led_register_device(0, &mx28evk_led_data); 486 gpio_led_register_device(0, &mx28evk_led_data);
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c
index c6fe61dfe856..42061573e380 100644
--- a/arch/arm/mach-omap1/board-palmz71.c
+++ b/arch/arm/mach-omap1/board-palmz71.c
@@ -42,7 +42,6 @@
42#include <plat/irda.h> 42#include <plat/irda.h>
43#include <plat/keypad.h> 43#include <plat/keypad.h>
44#include <plat/common.h> 44#include <plat/common.h>
45#include <plat/omap-alsa.h>
46 45
47#include <linux/spi/spi.h> 46#include <linux/spi/spi.h>
48#include <linux/spi/ads7846.h> 47#include <linux/spi/ads7846.h>
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 495b3987d461..89ea20ca0ccc 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -116,7 +116,7 @@ void omap1_pm_idle(void)
116 return; 116 return;
117 } 117 }
118 118
119#ifdef CONFIG_OMAP_MPU_TIMER 119#if defined(CONFIG_OMAP_MPU_TIMER) && !defined(CONFIG_OMAP_DM_TIMER)
120#warning Enable 32kHz OS timer in order to allow sleep states in idle 120#warning Enable 32kHz OS timer in order to allow sleep states in idle
121 use_idlect1 = use_idlect1 & ~(1 << 9); 121 use_idlect1 = use_idlect1 & ~(1 << 9);
122#else 122#else
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 42918940c530..90154e411da0 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -226,7 +226,6 @@ static int devkit8000_twl_gpio_setup(struct device *dev,
226{ 226{
227 int ret; 227 int ret;
228 228
229 omap_mux_init_gpio(29, OMAP_PIN_INPUT);
230 /* gpio + 0 is "mmc0_cd" (input/IRQ) */ 229 /* gpio + 0 is "mmc0_cd" (input/IRQ) */
231 mmc[0].gpio_cd = gpio + 0; 230 mmc[0].gpio_cd = gpio + 0;
232 omap2_hsmmc_init(mmc); 231 omap2_hsmmc_init(mmc);
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 0cc9094e5ee0..fb55fa3dad5a 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -28,6 +28,7 @@
28 * XXX: Still needed to boot until the i2c & twl driver is adapted to 28 * XXX: Still needed to boot until the i2c & twl driver is adapted to
29 * device-tree 29 * device-tree
30 */ 30 */
31#ifdef CONFIG_ARCH_OMAP4
31static struct twl4030_platform_data sdp4430_twldata = { 32static struct twl4030_platform_data sdp4430_twldata = {
32 .irq_base = TWL6030_IRQ_BASE, 33 .irq_base = TWL6030_IRQ_BASE,
33 .irq_end = TWL6030_IRQ_END, 34 .irq_end = TWL6030_IRQ_END,
@@ -37,7 +38,9 @@ static void __init omap4_i2c_init(void)
37{ 38{
38 omap4_pmic_init("twl6030", &sdp4430_twldata); 39 omap4_pmic_init("twl6030", &sdp4430_twldata);
39} 40}
41#endif
40 42
43#ifdef CONFIG_ARCH_OMAP3
41static struct twl4030_platform_data beagle_twldata = { 44static struct twl4030_platform_data beagle_twldata = {
42 .irq_base = TWL4030_IRQ_BASE, 45 .irq_base = TWL4030_IRQ_BASE,
43 .irq_end = TWL4030_IRQ_END, 46 .irq_end = TWL4030_IRQ_END,
@@ -47,6 +50,7 @@ static void __init omap3_i2c_init(void)
47{ 50{
48 omap3_pmic_init("twl4030", &beagle_twldata); 51 omap3_pmic_init("twl4030", &beagle_twldata);
49} 52}
53#endif
50 54
51static struct of_device_id omap_dt_match_table[] __initdata = { 55static struct of_device_id omap_dt_match_table[] __initdata = {
52 { .compatible = "simple-bus", }, 56 { .compatible = "simple-bus", },
@@ -72,17 +76,21 @@ static void __init omap_generic_init(void)
72 of_platform_populate(NULL, omap_dt_match_table, NULL, NULL); 76 of_platform_populate(NULL, omap_dt_match_table, NULL, NULL);
73} 77}
74 78
79#ifdef CONFIG_ARCH_OMAP4
75static void __init omap4_init(void) 80static void __init omap4_init(void)
76{ 81{
77 omap4_i2c_init(); 82 omap4_i2c_init();
78 omap_generic_init(); 83 omap_generic_init();
79} 84}
85#endif
80 86
87#ifdef CONFIG_ARCH_OMAP3
81static void __init omap3_init(void) 88static void __init omap3_init(void)
82{ 89{
83 omap3_i2c_init(); 90 omap3_i2c_init();
84 omap_generic_init(); 91 omap_generic_init();
85} 92}
93#endif
86 94
87#if defined(CONFIG_SOC_OMAP2420) 95#if defined(CONFIG_SOC_OMAP2420)
88static const char *omap242x_boards_compat[] __initdata = { 96static const char *omap242x_boards_compat[] __initdata = {
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index c12666ee7017..8b351d92a1cc 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -25,6 +25,7 @@
25#include <linux/err.h> 25#include <linux/err.h>
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/io.h> 27#include <linux/io.h>
28#include <linux/input/matrix_keypad.h>
28 29
29#include <mach/hardware.h> 30#include <mach/hardware.h>
30#include <asm/mach-types.h> 31#include <asm/mach-types.h>
@@ -34,7 +35,6 @@
34#include <plat/usb.h> 35#include <plat/usb.h>
35#include <plat/board.h> 36#include <plat/board.h>
36#include <plat/common.h> 37#include <plat/common.h>
37#include <plat/keypad.h>
38#include <plat/menelaus.h> 38#include <plat/menelaus.h>
39#include <plat/dma.h> 39#include <plat/dma.h>
40#include <plat/gpmc.h> 40#include <plat/gpmc.h>
@@ -50,10 +50,8 @@
50 50
51#define H4_ETHR_GPIO_IRQ 92 51#define H4_ETHR_GPIO_IRQ 92
52 52
53static unsigned int row_gpios[6] = { 88, 89, 124, 11, 6, 96 }; 53#if defined(CONFIG_KEYBOARD_MATRIX) || defined(CONFIG_KEYBOARD_MATRIX_MODULE)
54static unsigned int col_gpios[7] = { 90, 91, 100, 36, 12, 97, 98 }; 54static const uint32_t board_matrix_keys[] = {
55
56static const unsigned int h4_keymap[] = {
57 KEY(0, 0, KEY_LEFT), 55 KEY(0, 0, KEY_LEFT),
58 KEY(1, 0, KEY_RIGHT), 56 KEY(1, 0, KEY_RIGHT),
59 KEY(2, 0, KEY_A), 57 KEY(2, 0, KEY_A),
@@ -86,6 +84,71 @@ static const unsigned int h4_keymap[] = {
86 KEY(4, 5, KEY_ENTER), 84 KEY(4, 5, KEY_ENTER),
87}; 85};
88 86
87static const struct matrix_keymap_data board_keymap_data = {
88 .keymap = board_matrix_keys,
89 .keymap_size = ARRAY_SIZE(board_matrix_keys),
90};
91
92static unsigned int board_keypad_row_gpios[] = {
93 88, 89, 124, 11, 6, 96
94};
95
96static unsigned int board_keypad_col_gpios[] = {
97 90, 91, 100, 36, 12, 97, 98
98};
99
100static struct matrix_keypad_platform_data board_keypad_platform_data = {
101 .keymap_data = &board_keymap_data,
102 .row_gpios = board_keypad_row_gpios,
103 .num_row_gpios = ARRAY_SIZE(board_keypad_row_gpios),
104 .col_gpios = board_keypad_col_gpios,
105 .num_col_gpios = ARRAY_SIZE(board_keypad_col_gpios),
106 .active_low = 1,
107
108 .debounce_ms = 20,
109 .col_scan_delay_us = 5,
110};
111
112static struct platform_device board_keyboard = {
113 .name = "matrix-keypad",
114 .id = -1,
115 .dev = {
116 .platform_data = &board_keypad_platform_data,
117 },
118};
119static void __init board_mkp_init(void)
120{
121 omap_mux_init_gpio(88, OMAP_PULL_ENA | OMAP_PULL_UP);
122 omap_mux_init_gpio(89, OMAP_PULL_ENA | OMAP_PULL_UP);
123 omap_mux_init_gpio(124, OMAP_PULL_ENA | OMAP_PULL_UP);
124 omap_mux_init_signal("mcbsp2_dr.gpio_11", OMAP_PULL_ENA | OMAP_PULL_UP);
125 if (omap_has_menelaus()) {
126 omap_mux_init_signal("sdrc_a14.gpio0",
127 OMAP_PULL_ENA | OMAP_PULL_UP);
128 omap_mux_init_signal("vlynq_rx0.gpio_15", 0);
129 omap_mux_init_signal("gpio_98", 0);
130 board_keypad_row_gpios[5] = 0;
131 board_keypad_col_gpios[2] = 15;
132 board_keypad_col_gpios[6] = 18;
133 } else {
134 omap_mux_init_signal("gpio_96", OMAP_PULL_ENA | OMAP_PULL_UP);
135 omap_mux_init_signal("gpio_100", 0);
136 omap_mux_init_signal("gpio_98", 0);
137 }
138 omap_mux_init_signal("gpio_90", 0);
139 omap_mux_init_signal("gpio_91", 0);
140 omap_mux_init_signal("gpio_36", 0);
141 omap_mux_init_signal("mcbsp2_clkx.gpio_12", 0);
142 omap_mux_init_signal("gpio_97", 0);
143
144 platform_device_register(&board_keyboard);
145}
146#else
147static inline void board_mkp_init(void)
148{
149}
150#endif
151
89static struct mtd_partition h4_partitions[] = { 152static struct mtd_partition h4_partitions[] = {
90 /* bootloader (U-Boot, etc) in first sector */ 153 /* bootloader (U-Boot, etc) in first sector */
91 { 154 {
@@ -137,31 +200,8 @@ static struct platform_device h4_flash_device = {
137 .resource = &h4_flash_resource, 200 .resource = &h4_flash_resource,
138}; 201};
139 202
140static const struct matrix_keymap_data h4_keymap_data = {
141 .keymap = h4_keymap,
142 .keymap_size = ARRAY_SIZE(h4_keymap),
143};
144
145static struct omap_kp_platform_data h4_kp_data = {
146 .rows = 6,
147 .cols = 7,
148 .keymap_data = &h4_keymap_data,
149 .rep = true,
150 .row_gpios = row_gpios,
151 .col_gpios = col_gpios,
152};
153
154static struct platform_device h4_kp_device = {
155 .name = "omap-keypad",
156 .id = -1,
157 .dev = {
158 .platform_data = &h4_kp_data,
159 },
160};
161
162static struct platform_device *h4_devices[] __initdata = { 203static struct platform_device *h4_devices[] __initdata = {
163 &h4_flash_device, 204 &h4_flash_device,
164 &h4_kp_device,
165}; 205};
166 206
167static struct panel_generic_dpi_data h4_panel_data = { 207static struct panel_generic_dpi_data h4_panel_data = {
@@ -336,31 +376,7 @@ static void __init omap_h4_init(void)
336 * if not needed. 376 * if not needed.
337 */ 377 */
338 378
339#if defined(CONFIG_KEYBOARD_OMAP) || defined(CONFIG_KEYBOARD_OMAP_MODULE) 379 board_mkp_init();
340 omap_mux_init_gpio(88, OMAP_PULL_ENA | OMAP_PULL_UP);
341 omap_mux_init_gpio(89, OMAP_PULL_ENA | OMAP_PULL_UP);
342 omap_mux_init_gpio(124, OMAP_PULL_ENA | OMAP_PULL_UP);
343 omap_mux_init_signal("mcbsp2_dr.gpio_11", OMAP_PULL_ENA | OMAP_PULL_UP);
344 if (omap_has_menelaus()) {
345 omap_mux_init_signal("sdrc_a14.gpio0",
346 OMAP_PULL_ENA | OMAP_PULL_UP);
347 omap_mux_init_signal("vlynq_rx0.gpio_15", 0);
348 omap_mux_init_signal("gpio_98", 0);
349 row_gpios[5] = 0;
350 col_gpios[2] = 15;
351 col_gpios[6] = 18;
352 } else {
353 omap_mux_init_signal("gpio_96", OMAP_PULL_ENA | OMAP_PULL_UP);
354 omap_mux_init_signal("gpio_100", 0);
355 omap_mux_init_signal("gpio_98", 0);
356 }
357 omap_mux_init_signal("gpio_90", 0);
358 omap_mux_init_signal("gpio_91", 0);
359 omap_mux_init_signal("gpio_36", 0);
360 omap_mux_init_signal("mcbsp2_clkx.gpio_12", 0);
361 omap_mux_init_signal("gpio_97", 0);
362#endif
363
364 i2c_register_board_info(1, h4_i2c_board_info, 380 i2c_register_board_info(1, h4_i2c_board_info,
365 ARRAY_SIZE(h4_i2c_board_info)); 381 ARRAY_SIZE(h4_i2c_board_info));
366 382
diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c
index bcffee001bfa..e069a9be93df 100644
--- a/arch/arm/mach-omap2/clkt_dpll.c
+++ b/arch/arm/mach-omap2/clkt_dpll.c
@@ -46,10 +46,19 @@
46 (DPLL_SCALE_FACTOR / DPLL_SCALE_BASE)) 46 (DPLL_SCALE_FACTOR / DPLL_SCALE_BASE))
47 47
48/* DPLL valid Fint frequency band limits - from 34xx TRM Section 4.7.6.2 */ 48/* DPLL valid Fint frequency band limits - from 34xx TRM Section 4.7.6.2 */
49#define DPLL_FINT_BAND1_MIN 750000 49#define OMAP3430_DPLL_FINT_BAND1_MIN 750000
50#define DPLL_FINT_BAND1_MAX 2100000 50#define OMAP3430_DPLL_FINT_BAND1_MAX 2100000
51#define DPLL_FINT_BAND2_MIN 7500000 51#define OMAP3430_DPLL_FINT_BAND2_MIN 7500000
52#define DPLL_FINT_BAND2_MAX 21000000 52#define OMAP3430_DPLL_FINT_BAND2_MAX 21000000
53
54/*
55 * DPLL valid Fint frequency range for OMAP36xx and OMAP4xxx.
56 * From device data manual section 4.3 "DPLL and DLL Specifications".
57 */
58#define OMAP3PLUS_DPLL_FINT_JTYPE_MIN 500000
59#define OMAP3PLUS_DPLL_FINT_JTYPE_MAX 2500000
60#define OMAP3PLUS_DPLL_FINT_MIN 32000
61#define OMAP3PLUS_DPLL_FINT_MAX 52000000
53 62
54/* _dpll_test_fint() return codes */ 63/* _dpll_test_fint() return codes */
55#define DPLL_FINT_UNDERFLOW -1 64#define DPLL_FINT_UNDERFLOW -1
@@ -71,33 +80,43 @@
71static int _dpll_test_fint(struct clk *clk, u8 n) 80static int _dpll_test_fint(struct clk *clk, u8 n)
72{ 81{
73 struct dpll_data *dd; 82 struct dpll_data *dd;
74 long fint; 83 long fint, fint_min, fint_max;
75 int ret = 0; 84 int ret = 0;
76 85
77 dd = clk->dpll_data; 86 dd = clk->dpll_data;
78 87
79 /* DPLL divider must result in a valid jitter correction val */ 88 /* DPLL divider must result in a valid jitter correction val */
80 fint = clk->parent->rate / n; 89 fint = clk->parent->rate / n;
81 if (fint < DPLL_FINT_BAND1_MIN) {
82 90
91 if (cpu_is_omap24xx()) {
92 /* Should not be called for OMAP2, so warn if it is called */
93 WARN(1, "No fint limits available for OMAP2!\n");
94 return DPLL_FINT_INVALID;
95 } else if (cpu_is_omap3430()) {
96 fint_min = OMAP3430_DPLL_FINT_BAND1_MIN;
97 fint_max = OMAP3430_DPLL_FINT_BAND2_MAX;
98 } else if (dd->flags & DPLL_J_TYPE) {
99 fint_min = OMAP3PLUS_DPLL_FINT_JTYPE_MIN;
100 fint_max = OMAP3PLUS_DPLL_FINT_JTYPE_MAX;
101 } else {
102 fint_min = OMAP3PLUS_DPLL_FINT_MIN;
103 fint_max = OMAP3PLUS_DPLL_FINT_MAX;
104 }
105
106 if (fint < fint_min) {
83 pr_debug("rejecting n=%d due to Fint failure, " 107 pr_debug("rejecting n=%d due to Fint failure, "
84 "lowering max_divider\n", n); 108 "lowering max_divider\n", n);
85 dd->max_divider = n; 109 dd->max_divider = n;
86 ret = DPLL_FINT_UNDERFLOW; 110 ret = DPLL_FINT_UNDERFLOW;
87 111 } else if (fint > fint_max) {
88 } else if (fint > DPLL_FINT_BAND1_MAX &&
89 fint < DPLL_FINT_BAND2_MIN) {
90
91 pr_debug("rejecting n=%d due to Fint failure\n", n);
92 ret = DPLL_FINT_INVALID;
93
94 } else if (fint > DPLL_FINT_BAND2_MAX) {
95
96 pr_debug("rejecting n=%d due to Fint failure, " 112 pr_debug("rejecting n=%d due to Fint failure, "
97 "boosting min_divider\n", n); 113 "boosting min_divider\n", n);
98 dd->min_divider = n; 114 dd->min_divider = n;
99 ret = DPLL_FINT_INVALID; 115 ret = DPLL_FINT_INVALID;
100 116 } else if (cpu_is_omap3430() && fint > OMAP3430_DPLL_FINT_BAND1_MAX &&
117 fint < OMAP3430_DPLL_FINT_BAND2_MIN) {
118 pr_debug("rejecting n=%d due to Fint failure\n", n);
119 ret = DPLL_FINT_INVALID;
101 } 120 }
102 121
103 return ret; 122 return ret;
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index 48ac568881bd..2311bc217226 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -66,6 +66,8 @@ void omap3_noncore_dpll_disable(struct clk *clk);
66int omap4_dpllmx_gatectrl_read(struct clk *clk); 66int omap4_dpllmx_gatectrl_read(struct clk *clk);
67void omap4_dpllmx_allow_gatectrl(struct clk *clk); 67void omap4_dpllmx_allow_gatectrl(struct clk *clk);
68void omap4_dpllmx_deny_gatectrl(struct clk *clk); 68void omap4_dpllmx_deny_gatectrl(struct clk *clk);
69long omap4_dpll_regm4xen_round_rate(struct clk *clk, unsigned long target_rate);
70unsigned long omap4_dpll_regm4xen_recalc(struct clk *clk);
69 71
70#ifdef CONFIG_OMAP_RESET_CLOCKS 72#ifdef CONFIG_OMAP_RESET_CLOCKS
71void omap2_clk_disable_unused(struct clk *clk); 73void omap2_clk_disable_unused(struct clk *clk);
diff --git a/arch/arm/mach-omap2/clock2420_data.c b/arch/arm/mach-omap2/clock2420_data.c
index 14a6277dd184..61ad3855f10a 100644
--- a/arch/arm/mach-omap2/clock2420_data.c
+++ b/arch/arm/mach-omap2/clock2420_data.c
@@ -1898,18 +1898,6 @@ static struct omap_clk omap2420_clks[] = {
1898 CLK(NULL, "pka_ick", &pka_ick, CK_242X), 1898 CLK(NULL, "pka_ick", &pka_ick, CK_242X),
1899 CLK(NULL, "usb_fck", &usb_fck, CK_242X), 1899 CLK(NULL, "usb_fck", &usb_fck, CK_242X),
1900 CLK("musb-hdrc", "fck", &osc_ck, CK_242X), 1900 CLK("musb-hdrc", "fck", &osc_ck, CK_242X),
1901 CLK("omap_timer.1", "fck", &gpt1_fck, CK_242X),
1902 CLK("omap_timer.2", "fck", &gpt2_fck, CK_242X),
1903 CLK("omap_timer.3", "fck", &gpt3_fck, CK_242X),
1904 CLK("omap_timer.4", "fck", &gpt4_fck, CK_242X),
1905 CLK("omap_timer.5", "fck", &gpt5_fck, CK_242X),
1906 CLK("omap_timer.6", "fck", &gpt6_fck, CK_242X),
1907 CLK("omap_timer.7", "fck", &gpt7_fck, CK_242X),
1908 CLK("omap_timer.8", "fck", &gpt8_fck, CK_242X),
1909 CLK("omap_timer.9", "fck", &gpt9_fck, CK_242X),
1910 CLK("omap_timer.10", "fck", &gpt10_fck, CK_242X),
1911 CLK("omap_timer.11", "fck", &gpt11_fck, CK_242X),
1912 CLK("omap_timer.12", "fck", &gpt12_fck, CK_242X),
1913 CLK("omap_timer.1", "32k_ck", &func_32k_ck, CK_243X), 1901 CLK("omap_timer.1", "32k_ck", &func_32k_ck, CK_243X),
1914 CLK("omap_timer.2", "32k_ck", &func_32k_ck, CK_243X), 1902 CLK("omap_timer.2", "32k_ck", &func_32k_ck, CK_243X),
1915 CLK("omap_timer.3", "32k_ck", &func_32k_ck, CK_243X), 1903 CLK("omap_timer.3", "32k_ck", &func_32k_ck, CK_243X),
diff --git a/arch/arm/mach-omap2/clock2430_data.c b/arch/arm/mach-omap2/clock2430_data.c
index ea6717cfa3c8..0cc12879e7b9 100644
--- a/arch/arm/mach-omap2/clock2430_data.c
+++ b/arch/arm/mach-omap2/clock2430_data.c
@@ -1998,18 +1998,6 @@ static struct omap_clk omap2430_clks[] = {
1998 CLK(NULL, "mdm_intc_ick", &mdm_intc_ick, CK_243X), 1998 CLK(NULL, "mdm_intc_ick", &mdm_intc_ick, CK_243X),
1999 CLK("omap_hsmmc.0", "mmchsdb_fck", &mmchsdb1_fck, CK_243X), 1999 CLK("omap_hsmmc.0", "mmchsdb_fck", &mmchsdb1_fck, CK_243X),
2000 CLK("omap_hsmmc.1", "mmchsdb_fck", &mmchsdb2_fck, CK_243X), 2000 CLK("omap_hsmmc.1", "mmchsdb_fck", &mmchsdb2_fck, CK_243X),
2001 CLK("omap_timer.1", "fck", &gpt1_fck, CK_243X),
2002 CLK("omap_timer.2", "fck", &gpt2_fck, CK_243X),
2003 CLK("omap_timer.3", "fck", &gpt3_fck, CK_243X),
2004 CLK("omap_timer.4", "fck", &gpt4_fck, CK_243X),
2005 CLK("omap_timer.5", "fck", &gpt5_fck, CK_243X),
2006 CLK("omap_timer.6", "fck", &gpt6_fck, CK_243X),
2007 CLK("omap_timer.7", "fck", &gpt7_fck, CK_243X),
2008 CLK("omap_timer.8", "fck", &gpt8_fck, CK_243X),
2009 CLK("omap_timer.9", "fck", &gpt9_fck, CK_243X),
2010 CLK("omap_timer.10", "fck", &gpt10_fck, CK_243X),
2011 CLK("omap_timer.11", "fck", &gpt11_fck, CK_243X),
2012 CLK("omap_timer.12", "fck", &gpt12_fck, CK_243X),
2013 CLK("omap_timer.1", "32k_ck", &func_32k_ck, CK_243X), 2001 CLK("omap_timer.1", "32k_ck", &func_32k_ck, CK_243X),
2014 CLK("omap_timer.2", "32k_ck", &func_32k_ck, CK_243X), 2002 CLK("omap_timer.2", "32k_ck", &func_32k_ck, CK_243X),
2015 CLK("omap_timer.3", "32k_ck", &func_32k_ck, CK_243X), 2003 CLK("omap_timer.3", "32k_ck", &func_32k_ck, CK_243X),
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 65dd363163bc..5d0064a4fb5a 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -3464,18 +3464,6 @@ static struct omap_clk omap3xxx_clks[] = {
3464 CLK("musb-am35x", "fck", &hsotgusb_fck_am35xx, CK_AM35XX), 3464 CLK("musb-am35x", "fck", &hsotgusb_fck_am35xx, CK_AM35XX),
3465 CLK(NULL, "hecc_ck", &hecc_ck, CK_AM35XX), 3465 CLK(NULL, "hecc_ck", &hecc_ck, CK_AM35XX),
3466 CLK(NULL, "uart4_ick", &uart4_ick_am35xx, CK_AM35XX), 3466 CLK(NULL, "uart4_ick", &uart4_ick_am35xx, CK_AM35XX),
3467 CLK("omap_timer.1", "fck", &gpt1_fck, CK_3XXX),
3468 CLK("omap_timer.2", "fck", &gpt2_fck, CK_3XXX),
3469 CLK("omap_timer.3", "fck", &gpt3_fck, CK_3XXX),
3470 CLK("omap_timer.4", "fck", &gpt4_fck, CK_3XXX),
3471 CLK("omap_timer.5", "fck", &gpt5_fck, CK_3XXX),
3472 CLK("omap_timer.6", "fck", &gpt6_fck, CK_3XXX),
3473 CLK("omap_timer.7", "fck", &gpt7_fck, CK_3XXX),
3474 CLK("omap_timer.8", "fck", &gpt8_fck, CK_3XXX),
3475 CLK("omap_timer.9", "fck", &gpt9_fck, CK_3XXX),
3476 CLK("omap_timer.10", "fck", &gpt10_fck, CK_3XXX),
3477 CLK("omap_timer.11", "fck", &gpt11_fck, CK_3XXX),
3478 CLK("omap_timer.12", "fck", &gpt12_fck, CK_3XXX),
3479 CLK("omap_timer.1", "32k_ck", &omap_32k_fck, CK_3XXX), 3467 CLK("omap_timer.1", "32k_ck", &omap_32k_fck, CK_3XXX),
3480 CLK("omap_timer.2", "32k_ck", &omap_32k_fck, CK_3XXX), 3468 CLK("omap_timer.2", "32k_ck", &omap_32k_fck, CK_3XXX),
3481 CLK("omap_timer.3", "32k_ck", &omap_32k_fck, CK_3XXX), 3469 CLK("omap_timer.3", "32k_ck", &omap_32k_fck, CK_3XXX),
diff --git a/arch/arm/mach-omap2/clock44xx.h b/arch/arm/mach-omap2/clock44xx.h
index 7ceb870e7ab8..287a46f78d97 100644
--- a/arch/arm/mach-omap2/clock44xx.h
+++ b/arch/arm/mach-omap2/clock44xx.h
@@ -8,6 +8,13 @@
8#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK44XX_H 8#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK44XX_H
9#define __ARCH_ARM_MACH_OMAP2_CLOCK44XX_H 9#define __ARCH_ARM_MACH_OMAP2_CLOCK44XX_H
10 10
11/*
12 * OMAP4430_REGM4XEN_MULT: If the CM_CLKMODE_DPLL_ABE.DPLL_REGM4XEN bit is
13 * set, then the DPLL's lock frequency is multiplied by 4 (OMAP4430 TRM
14 * vV Section 3.6.3.3.1 "DPLLs Output Clocks Parameters")
15 */
16#define OMAP4430_REGM4XEN_MULT 4
17
11int omap4xxx_clk_init(void); 18int omap4xxx_clk_init(void);
12 19
13#endif 20#endif
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index 946bf04a956d..0798a802497a 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -270,8 +270,8 @@ static struct clk dpll_abe_ck = {
270 .dpll_data = &dpll_abe_dd, 270 .dpll_data = &dpll_abe_dd,
271 .init = &omap2_init_dpll_parent, 271 .init = &omap2_init_dpll_parent,
272 .ops = &clkops_omap3_noncore_dpll_ops, 272 .ops = &clkops_omap3_noncore_dpll_ops,
273 .recalc = &omap3_dpll_recalc, 273 .recalc = &omap4_dpll_regm4xen_recalc,
274 .round_rate = &omap2_dpll_round_rate, 274 .round_rate = &omap4_dpll_regm4xen_round_rate,
275 .set_rate = &omap3_noncore_dpll_set_rate, 275 .set_rate = &omap3_noncore_dpll_set_rate,
276}; 276};
277 277
@@ -1195,11 +1195,25 @@ static struct clk l4_wkup_clk_mux_ck = {
1195 .recalc = &omap2_clksel_recalc, 1195 .recalc = &omap2_clksel_recalc,
1196}; 1196};
1197 1197
1198static const struct clksel_rate div2_2to1_rates[] = {
1199 { .div = 1, .val = 1, .flags = RATE_IN_4430 },
1200 { .div = 2, .val = 0, .flags = RATE_IN_4430 },
1201 { .div = 0 },
1202};
1203
1204static const struct clksel ocp_abe_iclk_div[] = {
1205 { .parent = &aess_fclk, .rates = div2_2to1_rates },
1206 { .parent = NULL },
1207};
1208
1198static struct clk ocp_abe_iclk = { 1209static struct clk ocp_abe_iclk = {
1199 .name = "ocp_abe_iclk", 1210 .name = "ocp_abe_iclk",
1200 .parent = &aess_fclk, 1211 .parent = &aess_fclk,
1212 .clksel = ocp_abe_iclk_div,
1213 .clksel_reg = OMAP4430_CM1_ABE_AESS_CLKCTRL,
1214 .clksel_mask = OMAP4430_CLKSEL_AESS_FCLK_MASK,
1201 .ops = &clkops_null, 1215 .ops = &clkops_null,
1202 .recalc = &followparent_recalc, 1216 .recalc = &omap2_clksel_recalc,
1203}; 1217};
1204 1218
1205static struct clk per_abe_24m_fclk = { 1219static struct clk per_abe_24m_fclk = {
@@ -1398,9 +1412,9 @@ static struct clk dss_dss_clk = {
1398}; 1412};
1399 1413
1400static const struct clksel_rate div3_8to32_rates[] = { 1414static const struct clksel_rate div3_8to32_rates[] = {
1401 { .div = 8, .val = 0, .flags = RATE_IN_44XX }, 1415 { .div = 8, .val = 0, .flags = RATE_IN_4460 },
1402 { .div = 16, .val = 1, .flags = RATE_IN_44XX }, 1416 { .div = 16, .val = 1, .flags = RATE_IN_4460 },
1403 { .div = 32, .val = 2, .flags = RATE_IN_44XX }, 1417 { .div = 32, .val = 2, .flags = RATE_IN_4460 },
1404 { .div = 0 }, 1418 { .div = 0 },
1405}; 1419};
1406 1420
@@ -3363,17 +3377,6 @@ static struct omap_clk omap44xx_clks[] = {
3363 CLK("usbhs-omap.0", "usbhost_ick", &dummy_ck, CK_443X), 3377 CLK("usbhs-omap.0", "usbhost_ick", &dummy_ck, CK_443X),
3364 CLK("usbhs-omap.0", "usbtll_fck", &dummy_ck, CK_443X), 3378 CLK("usbhs-omap.0", "usbtll_fck", &dummy_ck, CK_443X),
3365 CLK("omap_wdt", "ick", &dummy_ck, CK_443X), 3379 CLK("omap_wdt", "ick", &dummy_ck, CK_443X),
3366 CLK("omap_timer.1", "fck", &timer1_fck, CK_443X),
3367 CLK("omap_timer.2", "fck", &timer2_fck, CK_443X),
3368 CLK("omap_timer.3", "fck", &timer3_fck, CK_443X),
3369 CLK("omap_timer.4", "fck", &timer4_fck, CK_443X),
3370 CLK("omap_timer.5", "fck", &timer5_fck, CK_443X),
3371 CLK("omap_timer.6", "fck", &timer6_fck, CK_443X),
3372 CLK("omap_timer.7", "fck", &timer7_fck, CK_443X),
3373 CLK("omap_timer.8", "fck", &timer8_fck, CK_443X),
3374 CLK("omap_timer.9", "fck", &timer9_fck, CK_443X),
3375 CLK("omap_timer.10", "fck", &timer10_fck, CK_443X),
3376 CLK("omap_timer.11", "fck", &timer11_fck, CK_443X),
3377 CLK("omap_timer.1", "32k_ck", &sys_32k_ck, CK_443X), 3380 CLK("omap_timer.1", "32k_ck", &sys_32k_ck, CK_443X),
3378 CLK("omap_timer.2", "32k_ck", &sys_32k_ck, CK_443X), 3381 CLK("omap_timer.2", "32k_ck", &sys_32k_ck, CK_443X),
3379 CLK("omap_timer.3", "32k_ck", &sys_32k_ck, CK_443X), 3382 CLK("omap_timer.3", "32k_ck", &sys_32k_ck, CK_443X),
@@ -3403,12 +3406,12 @@ int __init omap4xxx_clk_init(void)
3403 struct omap_clk *c; 3406 struct omap_clk *c;
3404 u32 cpu_clkflg; 3407 u32 cpu_clkflg;
3405 3408
3406 if (cpu_is_omap44xx()) { 3409 if (cpu_is_omap443x()) {
3407 cpu_mask = RATE_IN_4430; 3410 cpu_mask = RATE_IN_4430;
3408 cpu_clkflg = CK_443X; 3411 cpu_clkflg = CK_443X;
3409 } else if (cpu_is_omap446x()) { 3412 } else if (cpu_is_omap446x()) {
3410 cpu_mask = RATE_IN_4460; 3413 cpu_mask = RATE_IN_4460 | RATE_IN_4430;
3411 cpu_clkflg = CK_446X; 3414 cpu_clkflg = CK_446X | CK_443X;
3412 } else { 3415 } else {
3413 return 0; 3416 return 0;
3414 } 3417 }
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 4bf6e6e8b100..1fe35c24fba2 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -88,17 +88,21 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
88/** 88/**
89 * omap3_enter_idle - Programs OMAP3 to enter the specified state 89 * omap3_enter_idle - Programs OMAP3 to enter the specified state
90 * @dev: cpuidle device 90 * @dev: cpuidle device
91 * @state: The target state to be programmed 91 * @drv: cpuidle driver
92 * @index: the index of state to be entered
92 * 93 *
93 * Called from the CPUidle framework to program the device to the 94 * Called from the CPUidle framework to program the device to the
94 * specified target state selected by the governor. 95 * specified target state selected by the governor.
95 */ 96 */
96static int omap3_enter_idle(struct cpuidle_device *dev, 97static int omap3_enter_idle(struct cpuidle_device *dev,
97 struct cpuidle_state *state) 98 struct cpuidle_driver *drv,
99 int index)
98{ 100{
99 struct omap3_idle_statedata *cx = cpuidle_get_statedata(state); 101 struct omap3_idle_statedata *cx =
102 cpuidle_get_statedata(&dev->states_usage[index]);
100 struct timespec ts_preidle, ts_postidle, ts_idle; 103 struct timespec ts_preidle, ts_postidle, ts_idle;
101 u32 mpu_state = cx->mpu_state, core_state = cx->core_state; 104 u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
105 int idle_time;
102 106
103 /* Used to keep track of the total time in idle */ 107 /* Used to keep track of the total time in idle */
104 getnstimeofday(&ts_preidle); 108 getnstimeofday(&ts_preidle);
@@ -113,7 +117,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
113 goto return_sleep_time; 117 goto return_sleep_time;
114 118
115 /* Deny idle for C1 */ 119 /* Deny idle for C1 */
116 if (state == &dev->states[0]) { 120 if (index == 0) {
117 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); 121 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
118 pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); 122 pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
119 } 123 }
@@ -122,7 +126,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
122 omap_sram_idle(); 126 omap_sram_idle();
123 127
124 /* Re-allow idle for C1 */ 128 /* Re-allow idle for C1 */
125 if (state == &dev->states[0]) { 129 if (index == 0) {
126 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); 130 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
127 pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); 131 pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
128 } 132 }
@@ -134,28 +138,38 @@ return_sleep_time:
134 local_irq_enable(); 138 local_irq_enable();
135 local_fiq_enable(); 139 local_fiq_enable();
136 140
137 return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC; 141 idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
142 USEC_PER_SEC;
143
144 /* Update cpuidle counters */
145 dev->last_residency = idle_time;
146
147 return index;
138} 148}
139 149
140/** 150/**
141 * next_valid_state - Find next valid C-state 151 * next_valid_state - Find next valid C-state
142 * @dev: cpuidle device 152 * @dev: cpuidle device
143 * @state: Currently selected C-state 153 * @drv: cpuidle driver
154 * @index: Index of currently selected c-state
144 * 155 *
145 * If the current state is valid, it is returned back to the caller. 156 * If the state corresponding to index is valid, index is returned back
146 * Else, this function searches for a lower c-state which is still 157 * to the caller. Else, this function searches for a lower c-state which is
147 * valid. 158 * still valid (as defined in omap3_power_states[]) and returns its index.
148 * 159 *
149 * A state is valid if the 'valid' field is enabled and 160 * A state is valid if the 'valid' field is enabled and
150 * if it satisfies the enable_off_mode condition. 161 * if it satisfies the enable_off_mode condition.
151 */ 162 */
152static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, 163static int next_valid_state(struct cpuidle_device *dev,
153 struct cpuidle_state *curr) 164 struct cpuidle_driver *drv,
165 int index)
154{ 166{
155 struct cpuidle_state *next = NULL; 167 struct cpuidle_state_usage *curr_usage = &dev->states_usage[index];
156 struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr); 168 struct cpuidle_state *curr = &drv->states[index];
169 struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr_usage);
157 u32 mpu_deepest_state = PWRDM_POWER_RET; 170 u32 mpu_deepest_state = PWRDM_POWER_RET;
158 u32 core_deepest_state = PWRDM_POWER_RET; 171 u32 core_deepest_state = PWRDM_POWER_RET;
172 int next_index = -1;
159 173
160 if (enable_off_mode) { 174 if (enable_off_mode) {
161 mpu_deepest_state = PWRDM_POWER_OFF; 175 mpu_deepest_state = PWRDM_POWER_OFF;
@@ -172,20 +186,20 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
172 if ((cx->valid) && 186 if ((cx->valid) &&
173 (cx->mpu_state >= mpu_deepest_state) && 187 (cx->mpu_state >= mpu_deepest_state) &&
174 (cx->core_state >= core_deepest_state)) { 188 (cx->core_state >= core_deepest_state)) {
175 return curr; 189 return index;
176 } else { 190 } else {
177 int idx = OMAP3_NUM_STATES - 1; 191 int idx = OMAP3_NUM_STATES - 1;
178 192
179 /* Reach the current state starting at highest C-state */ 193 /* Reach the current state starting at highest C-state */
180 for (; idx >= 0; idx--) { 194 for (; idx >= 0; idx--) {
181 if (&dev->states[idx] == curr) { 195 if (&drv->states[idx] == curr) {
182 next = &dev->states[idx]; 196 next_index = idx;
183 break; 197 break;
184 } 198 }
185 } 199 }
186 200
187 /* Should never hit this condition */ 201 /* Should never hit this condition */
188 WARN_ON(next == NULL); 202 WARN_ON(next_index == -1);
189 203
190 /* 204 /*
191 * Drop to next valid state. 205 * Drop to next valid state.
@@ -193,41 +207,44 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
193 */ 207 */
194 idx--; 208 idx--;
195 for (; idx >= 0; idx--) { 209 for (; idx >= 0; idx--) {
196 cx = cpuidle_get_statedata(&dev->states[idx]); 210 cx = cpuidle_get_statedata(&dev->states_usage[idx]);
197 if ((cx->valid) && 211 if ((cx->valid) &&
198 (cx->mpu_state >= mpu_deepest_state) && 212 (cx->mpu_state >= mpu_deepest_state) &&
199 (cx->core_state >= core_deepest_state)) { 213 (cx->core_state >= core_deepest_state)) {
200 next = &dev->states[idx]; 214 next_index = idx;
201 break; 215 break;
202 } 216 }
203 } 217 }
204 /* 218 /*
205 * C1 is always valid. 219 * C1 is always valid.
206 * So, no need to check for 'next==NULL' outside this loop. 220 * So, no need to check for 'next_index == -1' outside
221 * this loop.
207 */ 222 */
208 } 223 }
209 224
210 return next; 225 return next_index;
211} 226}
212 227
213/** 228/**
214 * omap3_enter_idle_bm - Checks for any bus activity 229 * omap3_enter_idle_bm - Checks for any bus activity
215 * @dev: cpuidle device 230 * @dev: cpuidle device
216 * @state: The target state to be programmed 231 * @drv: cpuidle driver
232 * @index: array index of target state to be programmed
217 * 233 *
218 * This function checks for any pending activity and then programs 234 * This function checks for any pending activity and then programs
219 * the device to the specified or a safer state. 235 * the device to the specified or a safer state.
220 */ 236 */
221static int omap3_enter_idle_bm(struct cpuidle_device *dev, 237static int omap3_enter_idle_bm(struct cpuidle_device *dev,
222 struct cpuidle_state *state) 238 struct cpuidle_driver *drv,
239 int index)
223{ 240{
224 struct cpuidle_state *new_state; 241 int new_state_idx;
225 u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state; 242 u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state;
226 struct omap3_idle_statedata *cx; 243 struct omap3_idle_statedata *cx;
227 int ret; 244 int ret;
228 245
229 if (!omap3_can_sleep()) { 246 if (!omap3_can_sleep()) {
230 new_state = dev->safe_state; 247 new_state_idx = drv->safe_state_index;
231 goto select_state; 248 goto select_state;
232 } 249 }
233 250
@@ -237,7 +254,7 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
237 */ 254 */
238 cam_state = pwrdm_read_pwrst(cam_pd); 255 cam_state = pwrdm_read_pwrst(cam_pd);
239 if (cam_state == PWRDM_POWER_ON) { 256 if (cam_state == PWRDM_POWER_ON) {
240 new_state = dev->safe_state; 257 new_state_idx = drv->safe_state_index;
241 goto select_state; 258 goto select_state;
242 } 259 }
243 260
@@ -253,7 +270,7 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
253 * Prevent PER off if CORE is not in retention or off as this 270 * Prevent PER off if CORE is not in retention or off as this
254 * would disable PER wakeups completely. 271 * would disable PER wakeups completely.
255 */ 272 */
256 cx = cpuidle_get_statedata(state); 273 cx = cpuidle_get_statedata(&dev->states_usage[index]);
257 core_next_state = cx->core_state; 274 core_next_state = cx->core_state;
258 per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd); 275 per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
259 if ((per_next_state == PWRDM_POWER_OFF) && 276 if ((per_next_state == PWRDM_POWER_OFF) &&
@@ -264,11 +281,10 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
264 if (per_next_state != per_saved_state) 281 if (per_next_state != per_saved_state)
265 pwrdm_set_next_pwrst(per_pd, per_next_state); 282 pwrdm_set_next_pwrst(per_pd, per_next_state);
266 283
267 new_state = next_valid_state(dev, state); 284 new_state_idx = next_valid_state(dev, drv, index);
268 285
269select_state: 286select_state:
270 dev->last_state = new_state; 287 ret = omap3_enter_idle(dev, drv, new_state_idx);
271 ret = omap3_enter_idle(dev, new_state);
272 288
273 /* Restore original PER state if it was modified */ 289 /* Restore original PER state if it was modified */
274 if (per_next_state != per_saved_state) 290 if (per_next_state != per_saved_state)
@@ -301,22 +317,31 @@ struct cpuidle_driver omap3_idle_driver = {
301 .owner = THIS_MODULE, 317 .owner = THIS_MODULE,
302}; 318};
303 319
304/* Helper to fill the C-state common data and register the driver_data */ 320/* Helper to fill the C-state common data*/
305static inline struct omap3_idle_statedata *_fill_cstate( 321static inline void _fill_cstate(struct cpuidle_driver *drv,
306 struct cpuidle_device *dev,
307 int idx, const char *descr) 322 int idx, const char *descr)
308{ 323{
309 struct omap3_idle_statedata *cx = &omap3_idle_data[idx]; 324 struct cpuidle_state *state = &drv->states[idx];
310 struct cpuidle_state *state = &dev->states[idx];
311 325
312 state->exit_latency = cpuidle_params_table[idx].exit_latency; 326 state->exit_latency = cpuidle_params_table[idx].exit_latency;
313 state->target_residency = cpuidle_params_table[idx].target_residency; 327 state->target_residency = cpuidle_params_table[idx].target_residency;
314 state->flags = CPUIDLE_FLAG_TIME_VALID; 328 state->flags = CPUIDLE_FLAG_TIME_VALID;
315 state->enter = omap3_enter_idle_bm; 329 state->enter = omap3_enter_idle_bm;
316 cx->valid = cpuidle_params_table[idx].valid;
317 sprintf(state->name, "C%d", idx + 1); 330 sprintf(state->name, "C%d", idx + 1);
318 strncpy(state->desc, descr, CPUIDLE_DESC_LEN); 331 strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
319 cpuidle_set_statedata(state, cx); 332
333}
334
335/* Helper to register the driver_data */
336static inline struct omap3_idle_statedata *_fill_cstate_usage(
337 struct cpuidle_device *dev,
338 int idx)
339{
340 struct omap3_idle_statedata *cx = &omap3_idle_data[idx];
341 struct cpuidle_state_usage *state_usage = &dev->states_usage[idx];
342
343 cx->valid = cpuidle_params_table[idx].valid;
344 cpuidle_set_statedata(state_usage, cx);
320 345
321 return cx; 346 return cx;
322} 347}
@@ -330,6 +355,7 @@ static inline struct omap3_idle_statedata *_fill_cstate(
330int __init omap3_idle_init(void) 355int __init omap3_idle_init(void)
331{ 356{
332 struct cpuidle_device *dev; 357 struct cpuidle_device *dev;
358 struct cpuidle_driver *drv = &omap3_idle_driver;
333 struct omap3_idle_statedata *cx; 359 struct omap3_idle_statedata *cx;
334 360
335 mpu_pd = pwrdm_lookup("mpu_pwrdm"); 361 mpu_pd = pwrdm_lookup("mpu_pwrdm");
@@ -337,44 +363,52 @@ int __init omap3_idle_init(void)
337 per_pd = pwrdm_lookup("per_pwrdm"); 363 per_pd = pwrdm_lookup("per_pwrdm");
338 cam_pd = pwrdm_lookup("cam_pwrdm"); 364 cam_pd = pwrdm_lookup("cam_pwrdm");
339 365
340 cpuidle_register_driver(&omap3_idle_driver); 366
367 drv->safe_state_index = -1;
341 dev = &per_cpu(omap3_idle_dev, smp_processor_id()); 368 dev = &per_cpu(omap3_idle_dev, smp_processor_id());
342 369
343 /* C1 . MPU WFI + Core active */ 370 /* C1 . MPU WFI + Core active */
344 cx = _fill_cstate(dev, 0, "MPU ON + CORE ON"); 371 _fill_cstate(drv, 0, "MPU ON + CORE ON");
345 (&dev->states[0])->enter = omap3_enter_idle; 372 (&drv->states[0])->enter = omap3_enter_idle;
346 dev->safe_state = &dev->states[0]; 373 drv->safe_state_index = 0;
374 cx = _fill_cstate_usage(dev, 0);
347 cx->valid = 1; /* C1 is always valid */ 375 cx->valid = 1; /* C1 is always valid */
348 cx->mpu_state = PWRDM_POWER_ON; 376 cx->mpu_state = PWRDM_POWER_ON;
349 cx->core_state = PWRDM_POWER_ON; 377 cx->core_state = PWRDM_POWER_ON;
350 378
351 /* C2 . MPU WFI + Core inactive */ 379 /* C2 . MPU WFI + Core inactive */
352 cx = _fill_cstate(dev, 1, "MPU ON + CORE ON"); 380 _fill_cstate(drv, 1, "MPU ON + CORE ON");
381 cx = _fill_cstate_usage(dev, 1);
353 cx->mpu_state = PWRDM_POWER_ON; 382 cx->mpu_state = PWRDM_POWER_ON;
354 cx->core_state = PWRDM_POWER_ON; 383 cx->core_state = PWRDM_POWER_ON;
355 384
356 /* C3 . MPU CSWR + Core inactive */ 385 /* C3 . MPU CSWR + Core inactive */
357 cx = _fill_cstate(dev, 2, "MPU RET + CORE ON"); 386 _fill_cstate(drv, 2, "MPU RET + CORE ON");
387 cx = _fill_cstate_usage(dev, 2);
358 cx->mpu_state = PWRDM_POWER_RET; 388 cx->mpu_state = PWRDM_POWER_RET;
359 cx->core_state = PWRDM_POWER_ON; 389 cx->core_state = PWRDM_POWER_ON;
360 390
361 /* C4 . MPU OFF + Core inactive */ 391 /* C4 . MPU OFF + Core inactive */
362 cx = _fill_cstate(dev, 3, "MPU OFF + CORE ON"); 392 _fill_cstate(drv, 3, "MPU OFF + CORE ON");
393 cx = _fill_cstate_usage(dev, 3);
363 cx->mpu_state = PWRDM_POWER_OFF; 394 cx->mpu_state = PWRDM_POWER_OFF;
364 cx->core_state = PWRDM_POWER_ON; 395 cx->core_state = PWRDM_POWER_ON;
365 396
366 /* C5 . MPU RET + Core RET */ 397 /* C5 . MPU RET + Core RET */
367 cx = _fill_cstate(dev, 4, "MPU RET + CORE RET"); 398 _fill_cstate(drv, 4, "MPU RET + CORE RET");
399 cx = _fill_cstate_usage(dev, 4);
368 cx->mpu_state = PWRDM_POWER_RET; 400 cx->mpu_state = PWRDM_POWER_RET;
369 cx->core_state = PWRDM_POWER_RET; 401 cx->core_state = PWRDM_POWER_RET;
370 402
371 /* C6 . MPU OFF + Core RET */ 403 /* C6 . MPU OFF + Core RET */
372 cx = _fill_cstate(dev, 5, "MPU OFF + CORE RET"); 404 _fill_cstate(drv, 5, "MPU OFF + CORE RET");
405 cx = _fill_cstate_usage(dev, 5);
373 cx->mpu_state = PWRDM_POWER_OFF; 406 cx->mpu_state = PWRDM_POWER_OFF;
374 cx->core_state = PWRDM_POWER_RET; 407 cx->core_state = PWRDM_POWER_RET;
375 408
376 /* C7 . MPU OFF + Core OFF */ 409 /* C7 . MPU OFF + Core OFF */
377 cx = _fill_cstate(dev, 6, "MPU OFF + CORE OFF"); 410 _fill_cstate(drv, 6, "MPU OFF + CORE OFF");
411 cx = _fill_cstate_usage(dev, 6);
378 /* 412 /*
379 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot 413 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
380 * enable OFF mode in a stable form for previous revisions. 414 * enable OFF mode in a stable form for previous revisions.
@@ -388,6 +422,9 @@ int __init omap3_idle_init(void)
388 cx->mpu_state = PWRDM_POWER_OFF; 422 cx->mpu_state = PWRDM_POWER_OFF;
389 cx->core_state = PWRDM_POWER_OFF; 423 cx->core_state = PWRDM_POWER_OFF;
390 424
425 drv->state_count = OMAP3_NUM_STATES;
426 cpuidle_register_driver(&omap3_idle_driver);
427
391 dev->state_count = OMAP3_NUM_STATES; 428 dev->state_count = OMAP3_NUM_STATES;
392 if (cpuidle_register_device(dev)) { 429 if (cpuidle_register_device(dev)) {
393 printk(KERN_ERR "%s: CPUidle register device failed\n", 430 printk(KERN_ERR "%s: CPUidle register device failed\n",
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 68ec03152d5f..c15cfada5f13 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -318,18 +318,10 @@ static inline void omap_init_audio(void) {}
318#if defined(CONFIG_SND_OMAP_SOC_MCPDM) || \ 318#if defined(CONFIG_SND_OMAP_SOC_MCPDM) || \
319 defined(CONFIG_SND_OMAP_SOC_MCPDM_MODULE) 319 defined(CONFIG_SND_OMAP_SOC_MCPDM_MODULE)
320 320
321static struct omap_device_pm_latency omap_mcpdm_latency[] = {
322 {
323 .deactivate_func = omap_device_idle_hwmods,
324 .activate_func = omap_device_enable_hwmods,
325 .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
326 },
327};
328
329static void omap_init_mcpdm(void) 321static void omap_init_mcpdm(void)
330{ 322{
331 struct omap_hwmod *oh; 323 struct omap_hwmod *oh;
332 struct omap_device *od; 324 struct platform_device *pdev;
333 325
334 oh = omap_hwmod_lookup("mcpdm"); 326 oh = omap_hwmod_lookup("mcpdm");
335 if (!oh) { 327 if (!oh) {
@@ -337,11 +329,8 @@ static void omap_init_mcpdm(void)
337 return; 329 return;
338 } 330 }
339 331
340 od = omap_device_build("omap-mcpdm", -1, oh, NULL, 0, 332 pdev = omap_device_build("omap-mcpdm", -1, oh, NULL, 0, NULL, 0, 0);
341 omap_mcpdm_latency, 333 WARN(IS_ERR(pdev), "Can't build omap_device for omap-mcpdm.\n");
342 ARRAY_SIZE(omap_mcpdm_latency), 0);
343 if (IS_ERR(od))
344 printk(KERN_ERR "Could not build omap_device for omap-mcpdm-dai\n");
345} 334}
346#else 335#else
347static inline void omap_init_mcpdm(void) {} 336static inline void omap_init_mcpdm(void) {}
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
index f77022be783d..fc56745676fa 100644
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ b/arch/arm/mach-omap2/dpll3xxx.c
@@ -390,7 +390,8 @@ int omap3_noncore_dpll_enable(struct clk *clk)
390 * propagating? 390 * propagating?
391 */ 391 */
392 if (!r) 392 if (!r)
393 clk->rate = omap2_get_dpll_rate(clk); 393 clk->rate = (clk->recalc) ? clk->recalc(clk) :
394 omap2_get_dpll_rate(clk);
394 395
395 return r; 396 return r;
396} 397}
@@ -424,6 +425,7 @@ void omap3_noncore_dpll_disable(struct clk *clk)
424int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate) 425int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate)
425{ 426{
426 struct clk *new_parent = NULL; 427 struct clk *new_parent = NULL;
428 unsigned long hw_rate;
427 u16 freqsel = 0; 429 u16 freqsel = 0;
428 struct dpll_data *dd; 430 struct dpll_data *dd;
429 int ret; 431 int ret;
@@ -435,7 +437,8 @@ int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate)
435 if (!dd) 437 if (!dd)
436 return -EINVAL; 438 return -EINVAL;
437 439
438 if (rate == omap2_get_dpll_rate(clk)) 440 hw_rate = (clk->recalc) ? clk->recalc(clk) : omap2_get_dpll_rate(clk);
441 if (rate == hw_rate)
439 return 0; 442 return 0;
440 443
441 /* 444 /*
@@ -455,7 +458,7 @@ int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate)
455 new_parent = dd->clk_bypass; 458 new_parent = dd->clk_bypass;
456 } else { 459 } else {
457 if (dd->last_rounded_rate != rate) 460 if (dd->last_rounded_rate != rate)
458 omap2_dpll_round_rate(clk, rate); 461 rate = clk->round_rate(clk, rate);
459 462
460 if (dd->last_rounded_rate == 0) 463 if (dd->last_rounded_rate == 0)
461 return -EINVAL; 464 return -EINVAL;
diff --git a/arch/arm/mach-omap2/dpll44xx.c b/arch/arm/mach-omap2/dpll44xx.c
index 4e4da6160d05..9c6a296b3dc3 100644
--- a/arch/arm/mach-omap2/dpll44xx.c
+++ b/arch/arm/mach-omap2/dpll44xx.c
@@ -19,6 +19,7 @@
19#include <plat/clock.h> 19#include <plat/clock.h>
20 20
21#include "clock.h" 21#include "clock.h"
22#include "clock44xx.h"
22#include "cm-regbits-44xx.h" 23#include "cm-regbits-44xx.h"
23 24
24/* Supported only on OMAP4 */ 25/* Supported only on OMAP4 */
@@ -82,3 +83,71 @@ const struct clkops clkops_omap4_dpllmx_ops = {
82 .deny_idle = omap4_dpllmx_deny_gatectrl, 83 .deny_idle = omap4_dpllmx_deny_gatectrl,
83}; 84};
84 85
86/**
87 * omap4_dpll_regm4xen_recalc - compute DPLL rate, considering REGM4XEN bit
88 * @clk: struct clk * of the DPLL to compute the rate for
89 *
90 * Compute the output rate for the OMAP4 DPLL represented by @clk.
91 * Takes the REGM4XEN bit into consideration, which is needed for the
92 * OMAP4 ABE DPLL. Returns the DPLL's output rate (before M-dividers)
93 * upon success, or 0 upon error.
94 */
95unsigned long omap4_dpll_regm4xen_recalc(struct clk *clk)
96{
97 u32 v;
98 unsigned long rate;
99 struct dpll_data *dd;
100
101 if (!clk || !clk->dpll_data)
102 return 0;
103
104 dd = clk->dpll_data;
105
106 rate = omap2_get_dpll_rate(clk);
107
108 /* regm4xen adds a multiplier of 4 to DPLL calculations */
109 v = __raw_readl(dd->control_reg);
110 if (v & OMAP4430_DPLL_REGM4XEN_MASK)
111 rate *= OMAP4430_REGM4XEN_MULT;
112
113 return rate;
114}
115
116/**
117 * omap4_dpll_regm4xen_round_rate - round DPLL rate, considering REGM4XEN bit
118 * @clk: struct clk * of the DPLL to round a rate for
119 * @target_rate: the desired rate of the DPLL
120 *
121 * Compute the rate that would be programmed into the DPLL hardware
122 * for @clk if set_rate() were to be provided with the rate
123 * @target_rate. Takes the REGM4XEN bit into consideration, which is
124 * needed for the OMAP4 ABE DPLL. Returns the rounded rate (before
125 * M-dividers) upon success, -EINVAL if @clk is null or not a DPLL, or
126 * ~0 if an error occurred in omap2_dpll_round_rate().
127 */
128long omap4_dpll_regm4xen_round_rate(struct clk *clk, unsigned long target_rate)
129{
130 u32 v;
131 struct dpll_data *dd;
132 long r;
133
134 if (!clk || !clk->dpll_data)
135 return -EINVAL;
136
137 dd = clk->dpll_data;
138
139 /* regm4xen adds a multiplier of 4 to DPLL calculations */
140 v = __raw_readl(dd->control_reg) & OMAP4430_DPLL_REGM4XEN_MASK;
141
142 if (v)
143 target_rate = target_rate / OMAP4430_REGM4XEN_MULT;
144
145 r = omap2_dpll_round_rate(clk, target_rate);
146 if (r == ~0)
147 return r;
148
149 if (v)
150 clk->dpll_data->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
151
152 return clk->dpll_data->last_rounded_rate;
153}
diff --git a/arch/arm/mach-omap2/dsp.c b/arch/arm/mach-omap2/dsp.c
index 911cd2e68d46..74f18f2952df 100644
--- a/arch/arm/mach-omap2/dsp.c
+++ b/arch/arm/mach-omap2/dsp.c
@@ -18,6 +18,7 @@
18 * of the OMAP PM core code. 18 * of the OMAP PM core code.
19 */ 19 */
20 20
21#include <linux/module.h>
21#include <linux/platform_device.h> 22#include <linux/platform_device.h>
22#include "cm2xxx_3xxx.h" 23#include "cm2xxx_3xxx.h"
23#include "prm2xxx_3xxx.h" 24#include "prm2xxx_3xxx.h"
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index 77085847e4e7..f4a1020559a7 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -129,15 +129,11 @@ static void omap4_hsmmc1_before_set_reg(struct device *dev, int slot,
129 * Assume we power both OMAP VMMC1 (for CMD, CLK, DAT0..3) and the 129 * Assume we power both OMAP VMMC1 (for CMD, CLK, DAT0..3) and the
130 * card with Vcc regulator (from twl4030 or whatever). OMAP has both 130 * card with Vcc regulator (from twl4030 or whatever). OMAP has both
131 * 1.8V and 3.0V modes, controlled by the PBIAS register. 131 * 1.8V and 3.0V modes, controlled by the PBIAS register.
132 *
133 * In 8-bit modes, OMAP VMMC1A (for DAT4..7) needs a supply, which
134 * is most naturally TWL VSIM; those pins also use PBIAS.
135 *
136 * FIXME handle VMMC1A as needed ...
137 */ 132 */
138 reg = omap4_ctrl_pad_readl(control_pbias_offset); 133 reg = omap4_ctrl_pad_readl(control_pbias_offset);
139 reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK | 134 reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
140 OMAP4_MMC1_PWRDNZ_MASK); 135 OMAP4_MMC1_PWRDNZ_MASK |
136 OMAP4_MMC1_PBIASLITE_VMODE_MASK);
141 omap4_ctrl_pad_writel(reg, control_pbias_offset); 137 omap4_ctrl_pad_writel(reg, control_pbias_offset);
142} 138}
143 139
@@ -172,12 +168,6 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
172 reg &= ~(OMAP4_MMC1_PWRDNZ_MASK); 168 reg &= ~(OMAP4_MMC1_PWRDNZ_MASK);
173 omap4_ctrl_pad_writel(reg, control_pbias_offset); 169 omap4_ctrl_pad_writel(reg, control_pbias_offset);
174 } 170 }
175 } else {
176 reg = omap4_ctrl_pad_readl(control_pbias_offset);
177 reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
178 OMAP4_MMC1_PWRDNZ_MASK |
179 OMAP4_MMC1_PBIASLITE_VMODE_MASK);
180 omap4_ctrl_pad_writel(reg, control_pbias_offset);
181 } 171 }
182} 172}
183 173
@@ -489,7 +479,7 @@ void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
489 OMAP4_SDMMC1_PUSTRENGTH_GRP1_MASK); 479 OMAP4_SDMMC1_PUSTRENGTH_GRP1_MASK);
490 reg &= ~(OMAP4_SDMMC1_PUSTRENGTH_GRP2_MASK | 480 reg &= ~(OMAP4_SDMMC1_PUSTRENGTH_GRP2_MASK |
491 OMAP4_SDMMC1_PUSTRENGTH_GRP3_MASK); 481 OMAP4_SDMMC1_PUSTRENGTH_GRP3_MASK);
492 reg |= (OMAP4_USBC1_DR0_SPEEDCTRL_MASK| 482 reg |= (OMAP4_SDMMC1_DR0_SPEEDCTRL_MASK |
493 OMAP4_SDMMC1_DR1_SPEEDCTRL_MASK | 483 OMAP4_SDMMC1_DR1_SPEEDCTRL_MASK |
494 OMAP4_SDMMC1_DR2_SPEEDCTRL_MASK); 484 OMAP4_SDMMC1_DR2_SPEEDCTRL_MASK);
495 omap4_ctrl_pad_writel(reg, control_mmc1); 485 omap4_ctrl_pad_writel(reg, control_mmc1);
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index d27daf921c7e..7f47092a193f 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -187,8 +187,11 @@ static void __init omap3_check_features(void)
187 OMAP3_CHECK_FEATURE(status, ISP); 187 OMAP3_CHECK_FEATURE(status, ISP);
188 if (cpu_is_omap3630()) 188 if (cpu_is_omap3630())
189 omap_features |= OMAP3_HAS_192MHZ_CLK; 189 omap_features |= OMAP3_HAS_192MHZ_CLK;
190 if (!cpu_is_omap3505() && !cpu_is_omap3517()) 190 if (cpu_is_omap3430() || cpu_is_omap3630())
191 omap_features |= OMAP3_HAS_IO_WAKEUP; 191 omap_features |= OMAP3_HAS_IO_WAKEUP;
192 if (cpu_is_omap3630() || omap_rev() == OMAP3430_REV_ES3_1 ||
193 omap_rev() == OMAP3430_REV_ES3_1_2)
194 omap_features |= OMAP3_HAS_IO_CHAIN_CTRL;
192 195
193 omap_features |= OMAP3_HAS_SDRC; 196 omap_features |= OMAP3_HAS_SDRC;
194 197
diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
index c88420de1151..1e2d3322f33e 100644
--- a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
+++ b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
@@ -941,10 +941,10 @@
941#define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29) 941#define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29)
942#define OMAP4_DSI1_LANEENABLE_SHIFT 24 942#define OMAP4_DSI1_LANEENABLE_SHIFT 24
943#define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24) 943#define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24)
944#define OMAP4_DSI1_PIPD_SHIFT 19 944#define OMAP4_DSI2_PIPD_SHIFT 19
945#define OMAP4_DSI1_PIPD_MASK (0x1f << 19) 945#define OMAP4_DSI2_PIPD_MASK (0x1f << 19)
946#define OMAP4_DSI2_PIPD_SHIFT 14 946#define OMAP4_DSI1_PIPD_SHIFT 14
947#define OMAP4_DSI2_PIPD_MASK (0x1f << 14) 947#define OMAP4_DSI1_PIPD_MASK (0x1f << 14)
948 948
949/* CONTROL_MCBSPLP */ 949/* CONTROL_MCBSPLP */
950#define OMAP4_ALBCTRLRX_FSX_SHIFT 31 950#define OMAP4_ALBCTRLRX_FSX_SHIFT 31
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index a5d8dce2a70b..25d20ced03e1 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -359,6 +359,7 @@ static void __init omap_hwmod_init_postsetup(void)
359 omap_pm_if_early_init(); 359 omap_pm_if_early_init();
360} 360}
361 361
362#ifdef CONFIG_ARCH_OMAP2
362void __init omap2420_init_early(void) 363void __init omap2420_init_early(void)
363{ 364{
364 omap2_set_globals_242x(); 365 omap2_set_globals_242x();
@@ -382,11 +383,13 @@ void __init omap2430_init_early(void)
382 omap_hwmod_init_postsetup(); 383 omap_hwmod_init_postsetup();
383 omap2430_clk_init(); 384 omap2430_clk_init();
384} 385}
386#endif
385 387
386/* 388/*
387 * Currently only board-omap3beagle.c should call this because of the 389 * Currently only board-omap3beagle.c should call this because of the
388 * same machine_id for 34xx and 36xx beagle.. Will get fixed with DT. 390 * same machine_id for 34xx and 36xx beagle.. Will get fixed with DT.
389 */ 391 */
392#ifdef CONFIG_ARCH_OMAP3
390void __init omap3_init_early(void) 393void __init omap3_init_early(void)
391{ 394{
392 omap2_set_globals_3xxx(); 395 omap2_set_globals_3xxx();
@@ -430,7 +433,9 @@ void __init ti816x_init_early(void)
430 omap_hwmod_init_postsetup(); 433 omap_hwmod_init_postsetup();
431 omap3xxx_clk_init(); 434 omap3xxx_clk_init();
432} 435}
436#endif
433 437
438#ifdef CONFIG_ARCH_OMAP4
434void __init omap4430_init_early(void) 439void __init omap4430_init_early(void)
435{ 440{
436 omap2_set_globals_443x(); 441 omap2_set_globals_443x();
@@ -442,6 +447,7 @@ void __init omap4430_init_early(void)
442 omap_hwmod_init_postsetup(); 447 omap_hwmod_init_postsetup();
443 omap4xxx_clk_init(); 448 omap4xxx_clk_init();
444} 449}
450#endif
445 451
446void __init omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0, 452void __init omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
447 struct omap_sdrc_params *sdrc_cs1) 453 struct omap_sdrc_params *sdrc_cs1)
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 86d564a640bb..609ea2ded7e3 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -10,6 +10,7 @@
10 * for more details. 10 * for more details.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/clk.h> 14#include <linux/clk.h>
14#include <linux/err.h> 15#include <linux/err.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
index e61feadcda4e..b8822048e409 100644
--- a/arch/arm/mach-omap2/omap-iommu.c
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14 15
15#include <plat/iommu.h> 16#include <plat/iommu.h>
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index d71380705080..6b3088db83b7 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2625,7 +2625,7 @@ ohsps_unlock:
2625 * Returns the context loss count of the powerdomain assocated with @oh 2625 * Returns the context loss count of the powerdomain assocated with @oh
2626 * upon success, or zero if no powerdomain exists for @oh. 2626 * upon success, or zero if no powerdomain exists for @oh.
2627 */ 2627 */
2628u32 omap_hwmod_get_context_loss_count(struct omap_hwmod *oh) 2628int omap_hwmod_get_context_loss_count(struct omap_hwmod *oh)
2629{ 2629{
2630 struct powerdomain *pwrdm; 2630 struct powerdomain *pwrdm;
2631 int ret = 0; 2631 int ret = 0;
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 3008e1672c7a..bc9035ec87fc 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -3159,7 +3159,6 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
3159 &omap3xxx_mmc2_hwmod, 3159 &omap3xxx_mmc2_hwmod,
3160 &omap3xxx_mmc3_hwmod, 3160 &omap3xxx_mmc3_hwmod,
3161 &omap3xxx_mpu_hwmod, 3161 &omap3xxx_mpu_hwmod,
3162 &omap3xxx_iva_hwmod,
3163 3162
3164 &omap3xxx_timer1_hwmod, 3163 &omap3xxx_timer1_hwmod,
3165 &omap3xxx_timer2_hwmod, 3164 &omap3xxx_timer2_hwmod,
@@ -3188,8 +3187,6 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
3188 &omap3xxx_i2c1_hwmod, 3187 &omap3xxx_i2c1_hwmod,
3189 &omap3xxx_i2c2_hwmod, 3188 &omap3xxx_i2c2_hwmod,
3190 &omap3xxx_i2c3_hwmod, 3189 &omap3xxx_i2c3_hwmod,
3191 &omap34xx_sr1_hwmod,
3192 &omap34xx_sr2_hwmod,
3193 3190
3194 /* gpio class */ 3191 /* gpio class */
3195 &omap3xxx_gpio1_hwmod, 3192 &omap3xxx_gpio1_hwmod,
@@ -3211,8 +3208,6 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
3211 &omap3xxx_mcbsp2_sidetone_hwmod, 3208 &omap3xxx_mcbsp2_sidetone_hwmod,
3212 &omap3xxx_mcbsp3_sidetone_hwmod, 3209 &omap3xxx_mcbsp3_sidetone_hwmod,
3213 3210
3214 /* mailbox class */
3215 &omap3xxx_mailbox_hwmod,
3216 3211
3217 /* mcspi class */ 3212 /* mcspi class */
3218 &omap34xx_mcspi1, 3213 &omap34xx_mcspi1,
@@ -3225,31 +3220,39 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
3225 3220
3226/* 3430ES1-only hwmods */ 3221/* 3430ES1-only hwmods */
3227static __initdata struct omap_hwmod *omap3430es1_hwmods[] = { 3222static __initdata struct omap_hwmod *omap3430es1_hwmods[] = {
3223 &omap3xxx_iva_hwmod,
3228 &omap3430es1_dss_core_hwmod, 3224 &omap3430es1_dss_core_hwmod,
3225 &omap3xxx_mailbox_hwmod,
3229 NULL 3226 NULL
3230}; 3227};
3231 3228
3232/* 3430ES2+-only hwmods */ 3229/* 3430ES2+-only hwmods */
3233static __initdata struct omap_hwmod *omap3430es2plus_hwmods[] = { 3230static __initdata struct omap_hwmod *omap3430es2plus_hwmods[] = {
3231 &omap3xxx_iva_hwmod,
3234 &omap3xxx_dss_core_hwmod, 3232 &omap3xxx_dss_core_hwmod,
3235 &omap3xxx_usbhsotg_hwmod, 3233 &omap3xxx_usbhsotg_hwmod,
3234 &omap3xxx_mailbox_hwmod,
3236 NULL 3235 NULL
3237}; 3236};
3238 3237
3239/* 34xx-only hwmods (all ES revisions) */ 3238/* 34xx-only hwmods (all ES revisions) */
3240static __initdata struct omap_hwmod *omap34xx_hwmods[] = { 3239static __initdata struct omap_hwmod *omap34xx_hwmods[] = {
3240 &omap3xxx_iva_hwmod,
3241 &omap34xx_sr1_hwmod, 3241 &omap34xx_sr1_hwmod,
3242 &omap34xx_sr2_hwmod, 3242 &omap34xx_sr2_hwmod,
3243 &omap3xxx_mailbox_hwmod,
3243 NULL 3244 NULL
3244}; 3245};
3245 3246
3246/* 36xx-only hwmods (all ES revisions) */ 3247/* 36xx-only hwmods (all ES revisions) */
3247static __initdata struct omap_hwmod *omap36xx_hwmods[] = { 3248static __initdata struct omap_hwmod *omap36xx_hwmods[] = {
3249 &omap3xxx_iva_hwmod,
3248 &omap3xxx_uart4_hwmod, 3250 &omap3xxx_uart4_hwmod,
3249 &omap3xxx_dss_core_hwmod, 3251 &omap3xxx_dss_core_hwmod,
3250 &omap36xx_sr1_hwmod, 3252 &omap36xx_sr1_hwmod,
3251 &omap36xx_sr2_hwmod, 3253 &omap36xx_sr2_hwmod,
3252 &omap3xxx_usbhsotg_hwmod, 3254 &omap3xxx_usbhsotg_hwmod,
3255 &omap3xxx_mailbox_hwmod,
3253 NULL 3256 NULL
3254}; 3257};
3255 3258
@@ -3267,7 +3270,7 @@ int __init omap3xxx_hwmod_init(void)
3267 3270
3268 /* Register hwmods common to all OMAP3 */ 3271 /* Register hwmods common to all OMAP3 */
3269 r = omap_hwmod_register(omap3xxx_hwmods); 3272 r = omap_hwmod_register(omap3xxx_hwmods);
3270 if (!r) 3273 if (r < 0)
3271 return r; 3274 return r;
3272 3275
3273 rev = omap_rev(); 3276 rev = omap_rev();
@@ -3292,7 +3295,7 @@ int __init omap3xxx_hwmod_init(void)
3292 }; 3295 };
3293 3296
3294 r = omap_hwmod_register(h); 3297 r = omap_hwmod_register(h);
3295 if (!r) 3298 if (r < 0)
3296 return r; 3299 return r;
3297 3300
3298 /* 3301 /*
diff --git a/arch/arm/mach-omap2/omap_l3_noc.c b/arch/arm/mach-omap2/omap_l3_noc.c
index c8b1bef92e5a..6a66aa5e2a5b 100644
--- a/arch/arm/mach-omap2/omap_l3_noc.c
+++ b/arch/arm/mach-omap2/omap_l3_noc.c
@@ -20,6 +20,7 @@
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21 * USA 21 * USA
22 */ 22 */
23#include <linux/module.h>
23#include <linux/init.h> 24#include <linux/init.h>
24#include <linux/io.h> 25#include <linux/io.h>
25#include <linux/platform_device.h> 26#include <linux/platform_device.h>
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index c8cbd00a41af..efa66494c1e3 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -99,31 +99,27 @@ static void omap3_enable_io_chain(void)
99{ 99{
100 int timeout = 0; 100 int timeout = 0;
101 101
102 if (omap_rev() >= OMAP3430_REV_ES3_1) { 102 omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
103 omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD, 103 PM_WKEN);
104 PM_WKEN); 104 /* Do a readback to assure write has been done */
105 /* Do a readback to assure write has been done */ 105 omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN);
106 omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN); 106
107 107 while (!(omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
108 while (!(omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN) & 108 OMAP3430_ST_IO_CHAIN_MASK)) {
109 OMAP3430_ST_IO_CHAIN_MASK)) { 109 timeout++;
110 timeout++; 110 if (timeout > 1000) {
111 if (timeout > 1000) { 111 pr_err("Wake up daisy chain activation failed.\n");
112 printk(KERN_ERR "Wake up daisy chain " 112 return;
113 "activation failed.\n");
114 return;
115 }
116 omap2_prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
117 WKUP_MOD, PM_WKEN);
118 } 113 }
114 omap2_prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
115 WKUP_MOD, PM_WKEN);
119 } 116 }
120} 117}
121 118
122static void omap3_disable_io_chain(void) 119static void omap3_disable_io_chain(void)
123{ 120{
124 if (omap_rev() >= OMAP3430_REV_ES3_1) 121 omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
125 omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD, 122 PM_WKEN);
126 PM_WKEN);
127} 123}
128 124
129static void omap3_core_save_context(void) 125static void omap3_core_save_context(void)
@@ -363,7 +359,6 @@ void omap_sram_idle(void)
363 printk(KERN_ERR "Invalid mpu state in sram_idle\n"); 359 printk(KERN_ERR "Invalid mpu state in sram_idle\n");
364 return; 360 return;
365 } 361 }
366 pwrdm_pre_transition();
367 362
368 /* NEON control */ 363 /* NEON control */
369 if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON) 364 if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
@@ -376,7 +371,8 @@ void omap_sram_idle(void)
376 (per_next_state < PWRDM_POWER_ON || 371 (per_next_state < PWRDM_POWER_ON ||
377 core_next_state < PWRDM_POWER_ON)) { 372 core_next_state < PWRDM_POWER_ON)) {
378 omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN); 373 omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
379 omap3_enable_io_chain(); 374 if (omap3_has_io_chain_ctrl())
375 omap3_enable_io_chain();
380 } 376 }
381 377
382 /* Block console output in case it is on one of the OMAP UARTs */ 378 /* Block console output in case it is on one of the OMAP UARTs */
@@ -386,6 +382,8 @@ void omap_sram_idle(void)
386 if (!console_trylock()) 382 if (!console_trylock())
387 goto console_still_active; 383 goto console_still_active;
388 384
385 pwrdm_pre_transition();
386
389 /* PER */ 387 /* PER */
390 if (per_next_state < PWRDM_POWER_ON) { 388 if (per_next_state < PWRDM_POWER_ON) {
391 per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0; 389 per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
@@ -409,13 +407,14 @@ void omap_sram_idle(void)
409 omap3_intc_prepare_idle(); 407 omap3_intc_prepare_idle();
410 408
411 /* 409 /*
412 * On EMU/HS devices ROM code restores a SRDC value 410 * On EMU/HS devices ROM code restores a SRDC value
413 * from scratchpad which has automatic self refresh on timeout 411 * from scratchpad which has automatic self refresh on timeout
414 * of AUTO_CNT = 1 enabled. This takes care of erratum ID i443. 412 * of AUTO_CNT = 1 enabled. This takes care of erratum ID i443.
415 * Hence store/restore the SDRC_POWER register here. 413 * Hence store/restore the SDRC_POWER register here.
416 */ 414 */
417 if (omap_rev() >= OMAP3430_REV_ES3_0 && 415 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
418 omap_type() != OMAP2_DEVICE_TYPE_GP && 416 (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
417 omap_type() == OMAP2_DEVICE_TYPE_SEC) &&
419 core_next_state == PWRDM_POWER_OFF) 418 core_next_state == PWRDM_POWER_OFF)
420 sdrc_pwr = sdrc_read_reg(SDRC_POWER); 419 sdrc_pwr = sdrc_read_reg(SDRC_POWER);
421 420
@@ -432,8 +431,9 @@ void omap_sram_idle(void)
432 omap34xx_do_sram_idle(save_state); 431 omap34xx_do_sram_idle(save_state);
433 432
434 /* Restore normal SDRC POWER settings */ 433 /* Restore normal SDRC POWER settings */
435 if (omap_rev() >= OMAP3430_REV_ES3_0 && 434 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
436 omap_type() != OMAP2_DEVICE_TYPE_GP && 435 (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
436 omap_type() == OMAP2_DEVICE_TYPE_SEC) &&
437 core_next_state == PWRDM_POWER_OFF) 437 core_next_state == PWRDM_POWER_OFF)
438 sdrc_write_reg(sdrc_pwr, SDRC_POWER); 438 sdrc_write_reg(sdrc_pwr, SDRC_POWER);
439 439
@@ -455,6 +455,8 @@ void omap_sram_idle(void)
455 } 455 }
456 omap3_intc_resume_idle(); 456 omap3_intc_resume_idle();
457 457
458 pwrdm_post_transition();
459
458 /* PER */ 460 /* PER */
459 if (per_next_state < PWRDM_POWER_ON) { 461 if (per_next_state < PWRDM_POWER_ON) {
460 per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm); 462 per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm);
@@ -475,11 +477,10 @@ console_still_active:
475 core_next_state < PWRDM_POWER_ON)) { 477 core_next_state < PWRDM_POWER_ON)) {
476 omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, 478 omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
477 PM_WKEN); 479 PM_WKEN);
478 omap3_disable_io_chain(); 480 if (omap3_has_io_chain_ctrl())
481 omap3_disable_io_chain();
479 } 482 }
480 483
481 pwrdm_post_transition();
482
483 clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]); 484 clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
484} 485}
485 486
@@ -870,6 +871,9 @@ static int __init omap3_pm_init(void)
870 if (!cpu_is_omap34xx()) 871 if (!cpu_is_omap34xx())
871 return -ENODEV; 872 return -ENODEV;
872 873
874 if (!omap3_has_io_chain_ctrl())
875 pr_warning("PM: no software I/O chain control; some wakeups may be lost\n");
876
873 pm_errata_configure(); 877 pm_errata_configure();
874 878
875 /* XXX prcm_setup_regs needs to be before enabling hw 879 /* XXX prcm_setup_regs needs to be before enabling hw
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 5164d587ef52..8a18d1bd61c8 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -1002,16 +1002,16 @@ int pwrdm_post_transition(void)
1002 * @pwrdm: struct powerdomain * to wait for 1002 * @pwrdm: struct powerdomain * to wait for
1003 * 1003 *
1004 * Context loss count is the sum of powerdomain off-mode counter, the 1004 * Context loss count is the sum of powerdomain off-mode counter, the
1005 * logic off counter and the per-bank memory off counter. Returns 0 1005 * logic off counter and the per-bank memory off counter. Returns negative
1006 * (and WARNs) upon error, otherwise, returns the context loss count. 1006 * (and WARNs) upon error, otherwise, returns the context loss count.
1007 */ 1007 */
1008u32 pwrdm_get_context_loss_count(struct powerdomain *pwrdm) 1008int pwrdm_get_context_loss_count(struct powerdomain *pwrdm)
1009{ 1009{
1010 int i, count; 1010 int i, count;
1011 1011
1012 if (!pwrdm) { 1012 if (!pwrdm) {
1013 WARN(1, "powerdomain: %s: pwrdm is null\n", __func__); 1013 WARN(1, "powerdomain: %s: pwrdm is null\n", __func__);
1014 return 0; 1014 return -ENODEV;
1015 } 1015 }
1016 1016
1017 count = pwrdm->state_counter[PWRDM_POWER_OFF]; 1017 count = pwrdm->state_counter[PWRDM_POWER_OFF];
@@ -1020,7 +1020,13 @@ u32 pwrdm_get_context_loss_count(struct powerdomain *pwrdm)
1020 for (i = 0; i < pwrdm->banks; i++) 1020 for (i = 0; i < pwrdm->banks; i++)
1021 count += pwrdm->ret_mem_off_counter[i]; 1021 count += pwrdm->ret_mem_off_counter[i];
1022 1022
1023 pr_debug("powerdomain: %s: context loss count = %u\n", 1023 /*
1024 * Context loss count has to be a non-negative value. Clear the sign
1025 * bit to get a value range from 0 to INT_MAX.
1026 */
1027 count &= INT_MAX;
1028
1029 pr_debug("powerdomain: %s: context loss count = %d\n",
1024 pwrdm->name, count); 1030 pwrdm->name, count);
1025 1031
1026 return count; 1032 return count;
diff --git a/arch/arm/mach-omap2/powerdomain.h b/arch/arm/mach-omap2/powerdomain.h
index 42e6dd8f2a78..0d72a8a8ce4d 100644
--- a/arch/arm/mach-omap2/powerdomain.h
+++ b/arch/arm/mach-omap2/powerdomain.h
@@ -217,7 +217,7 @@ int pwrdm_clkdm_state_switch(struct clockdomain *clkdm);
217int pwrdm_pre_transition(void); 217int pwrdm_pre_transition(void);
218int pwrdm_post_transition(void); 218int pwrdm_post_transition(void);
219int pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm); 219int pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm);
220u32 pwrdm_get_context_loss_count(struct powerdomain *pwrdm); 220int pwrdm_get_context_loss_count(struct powerdomain *pwrdm);
221bool pwrdm_can_ever_lose_context(struct powerdomain *pwrdm); 221bool pwrdm_can_ever_lose_context(struct powerdomain *pwrdm);
222 222
223extern void omap242x_powerdomains_init(void); 223extern void omap242x_powerdomains_init(void);
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index 0347b93211e6..6a4f6839a7d9 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -17,6 +17,7 @@
17 * published by the Free Software Foundation. 17 * published by the Free Software Foundation.
18 */ 18 */
19 19
20#include <linux/module.h>
20#include <linux/interrupt.h> 21#include <linux/interrupt.h>
21#include <linux/clk.h> 22#include <linux/clk.h>
22#include <linux/io.h> 23#include <linux/io.h>
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index e49fc7be2229..037b0d7d4e05 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -408,14 +408,6 @@ static int omap2_dm_timer_set_src(struct platform_device *pdev, int source)
408 return ret; 408 return ret;
409} 409}
410 410
411struct omap_device_pm_latency omap2_dmtimer_latency[] = {
412 {
413 .deactivate_func = omap_device_idle_hwmods,
414 .activate_func = omap_device_enable_hwmods,
415 .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
416 },
417};
418
419/** 411/**
420 * omap_timer_init - build and register timer device with an 412 * omap_timer_init - build and register timer device with an
421 * associated timer hwmod 413 * associated timer hwmod
@@ -477,9 +469,7 @@ static int __init omap_timer_init(struct omap_hwmod *oh, void *unused)
477 pdata->get_context_loss_count = omap_pm_get_dev_context_loss_count; 469 pdata->get_context_loss_count = omap_pm_get_dev_context_loss_count;
478#endif 470#endif
479 pdev = omap_device_build(name, id, oh, pdata, sizeof(*pdata), 471 pdev = omap_device_build(name, id, oh, pdata, sizeof(*pdata),
480 omap2_dmtimer_latency, 472 NULL, 0, 0);
481 ARRAY_SIZE(omap2_dmtimer_latency),
482 0);
483 473
484 if (IS_ERR(pdev)) { 474 if (IS_ERR(pdev)) {
485 pr_err("%s: Can't build omap_device for %s: %s.\n", 475 pr_err("%s: Can't build omap_device for %s: %s.\n",
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 47fb5d607630..267975086a7b 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -60,44 +60,6 @@ static struct musb_hdrc_platform_data musb_plat = {
60 60
61static u64 musb_dmamask = DMA_BIT_MASK(32); 61static u64 musb_dmamask = DMA_BIT_MASK(32);
62 62
63static void usb_musb_mux_init(struct omap_musb_board_data *board_data)
64{
65 switch (board_data->interface_type) {
66 case MUSB_INTERFACE_UTMI:
67 omap_mux_init_signal("usba0_otg_dp", OMAP_PIN_INPUT);
68 omap_mux_init_signal("usba0_otg_dm", OMAP_PIN_INPUT);
69 break;
70 case MUSB_INTERFACE_ULPI:
71 omap_mux_init_signal("usba0_ulpiphy_clk",
72 OMAP_PIN_INPUT_PULLDOWN);
73 omap_mux_init_signal("usba0_ulpiphy_stp",
74 OMAP_PIN_INPUT_PULLDOWN);
75 omap_mux_init_signal("usba0_ulpiphy_dir",
76 OMAP_PIN_INPUT_PULLDOWN);
77 omap_mux_init_signal("usba0_ulpiphy_nxt",
78 OMAP_PIN_INPUT_PULLDOWN);
79 omap_mux_init_signal("usba0_ulpiphy_dat0",
80 OMAP_PIN_INPUT_PULLDOWN);
81 omap_mux_init_signal("usba0_ulpiphy_dat1",
82 OMAP_PIN_INPUT_PULLDOWN);
83 omap_mux_init_signal("usba0_ulpiphy_dat2",
84 OMAP_PIN_INPUT_PULLDOWN);
85 omap_mux_init_signal("usba0_ulpiphy_dat3",
86 OMAP_PIN_INPUT_PULLDOWN);
87 omap_mux_init_signal("usba0_ulpiphy_dat4",
88 OMAP_PIN_INPUT_PULLDOWN);
89 omap_mux_init_signal("usba0_ulpiphy_dat5",
90 OMAP_PIN_INPUT_PULLDOWN);
91 omap_mux_init_signal("usba0_ulpiphy_dat6",
92 OMAP_PIN_INPUT_PULLDOWN);
93 omap_mux_init_signal("usba0_ulpiphy_dat7",
94 OMAP_PIN_INPUT_PULLDOWN);
95 break;
96 default:
97 break;
98 }
99}
100
101static struct omap_musb_board_data musb_default_board_data = { 63static struct omap_musb_board_data musb_default_board_data = {
102 .interface_type = MUSB_INTERFACE_ULPI, 64 .interface_type = MUSB_INTERFACE_ULPI,
103 .mode = MUSB_OTG, 65 .mode = MUSB_OTG,
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index 6c75cd35c4c8..b35e2005a348 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -275,7 +275,7 @@ static struct platform_nand_data ts78xx_ts_nand_data = {
275 .partitions = ts78xx_ts_nand_parts, 275 .partitions = ts78xx_ts_nand_parts,
276 .nr_partitions = ARRAY_SIZE(ts78xx_ts_nand_parts), 276 .nr_partitions = ARRAY_SIZE(ts78xx_ts_nand_parts),
277 .chip_delay = 15, 277 .chip_delay = 15,
278 .options = NAND_USE_FLASH_BBT, 278 .bbt_options = NAND_BBT_USE_FLASH,
279 }, 279 },
280 .ctrl = { 280 .ctrl = {
281 /* 281 /*
diff --git a/arch/arm/mach-picoxcell/include/mach/debug-macro.S b/arch/arm/mach-picoxcell/include/mach/debug-macro.S
index 8f2c234ed9d9..58d4ee3ae949 100644
--- a/arch/arm/mach-picoxcell/include/mach/debug-macro.S
+++ b/arch/arm/mach-picoxcell/include/mach/debug-macro.S
@@ -14,7 +14,7 @@
14 14
15#define UART_SHIFT 2 15#define UART_SHIFT 2
16 16
17 .macro addruart, rp, rv 17 .macro addruart, rp, rv, tmp
18 ldr \rv, =PHYS_TO_IO(PICOXCELL_UART1_BASE) 18 ldr \rv, =PHYS_TO_IO(PICOXCELL_UART1_BASE)
19 ldr \rp, =PICOXCELL_UART1_BASE 19 ldr \rp, =PICOXCELL_UART1_BASE
20 .endm 20 .endm
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index 21ac8e3e2f7a..684acf6ed3d5 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -424,8 +424,9 @@ static struct mtd_partition cm_x300_nand_partitions[] = {
424static struct pxa3xx_nand_platform_data cm_x300_nand_info = { 424static struct pxa3xx_nand_platform_data cm_x300_nand_info = {
425 .enable_arbiter = 1, 425 .enable_arbiter = 1,
426 .keep_config = 1, 426 .keep_config = 1,
427 .parts = cm_x300_nand_partitions, 427 .num_cs = 1,
428 .nr_parts = ARRAY_SIZE(cm_x300_nand_partitions), 428 .parts[0] = cm_x300_nand_partitions,
429 .nr_parts[0] = ARRAY_SIZE(cm_x300_nand_partitions),
429}; 430};
430 431
431static void __init cm_x300_init_nand(void) 432static void __init cm_x300_init_nand(void)
diff --git a/arch/arm/mach-pxa/colibri-pxa3xx.c b/arch/arm/mach-pxa/colibri-pxa3xx.c
index 3f9be419959d..2b8ca0de8a3d 100644
--- a/arch/arm/mach-pxa/colibri-pxa3xx.c
+++ b/arch/arm/mach-pxa/colibri-pxa3xx.c
@@ -139,8 +139,9 @@ static struct mtd_partition colibri_nand_partitions[] = {
139static struct pxa3xx_nand_platform_data colibri_nand_info = { 139static struct pxa3xx_nand_platform_data colibri_nand_info = {
140 .enable_arbiter = 1, 140 .enable_arbiter = 1,
141 .keep_config = 1, 141 .keep_config = 1,
142 .parts = colibri_nand_partitions, 142 .num_cs = 1,
143 .nr_parts = ARRAY_SIZE(colibri_nand_partitions), 143 .parts[0] = colibri_nand_partitions,
144 .nr_parts[0] = ARRAY_SIZE(colibri_nand_partitions),
144}; 145};
145 146
146void __init colibri_pxa3xx_init_nand(void) 147void __init colibri_pxa3xx_init_nand(void)
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index 27147f6ff7cb..d21e28b46d81 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -325,8 +325,9 @@ static struct mtd_partition littleton_nand_partitions[] = {
325 325
326static struct pxa3xx_nand_platform_data littleton_nand_info = { 326static struct pxa3xx_nand_platform_data littleton_nand_info = {
327 .enable_arbiter = 1, 327 .enable_arbiter = 1,
328 .parts = littleton_nand_partitions, 328 .num_cs = 1,
329 .nr_parts = ARRAY_SIZE(littleton_nand_partitions), 329 .parts[0] = littleton_nand_partitions,
330 .nr_parts[0] = ARRAY_SIZE(littleton_nand_partitions),
330}; 331};
331 332
332static void __init littleton_init_nand(void) 333static void __init littleton_init_nand(void)
diff --git a/arch/arm/mach-pxa/mxm8x10.c b/arch/arm/mach-pxa/mxm8x10.c
index a13a1e365851..83570a79e7d2 100644
--- a/arch/arm/mach-pxa/mxm8x10.c
+++ b/arch/arm/mach-pxa/mxm8x10.c
@@ -389,10 +389,11 @@ static struct mtd_partition mxm_8x10_nand_partitions[] = {
389}; 389};
390 390
391static struct pxa3xx_nand_platform_data mxm_8x10_nand_info = { 391static struct pxa3xx_nand_platform_data mxm_8x10_nand_info = {
392 .enable_arbiter = 1, 392 .enable_arbiter = 1,
393 .keep_config = 1, 393 .keep_config = 1,
394 .parts = mxm_8x10_nand_partitions, 394 .num_cs = 1,
395 .nr_parts = ARRAY_SIZE(mxm_8x10_nand_partitions) 395 .parts[0] = mxm_8x10_nand_partitions,
396 .nr_parts[0] = ARRAY_SIZE(mxm_8x10_nand_partitions)
396}; 397};
397 398
398static void __init mxm_8x10_nand_init(void) 399static void __init mxm_8x10_nand_init(void)
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 9c58e87f2b82..78d643783f99 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -346,8 +346,9 @@ static struct mtd_partition raumfeld_nand_partitions[] = {
346static struct pxa3xx_nand_platform_data raumfeld_nand_info = { 346static struct pxa3xx_nand_platform_data raumfeld_nand_info = {
347 .enable_arbiter = 1, 347 .enable_arbiter = 1,
348 .keep_config = 1, 348 .keep_config = 1,
349 .parts = raumfeld_nand_partitions, 349 .num_cs = 1,
350 .nr_parts = ARRAY_SIZE(raumfeld_nand_partitions), 350 .parts[0] = raumfeld_nand_partitions,
351 .nr_parts[0] = ARRAY_SIZE(raumfeld_nand_partitions),
351}; 352};
352 353
353/** 354/**
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c
index 2406fd2727ef..a4c807527095 100644
--- a/arch/arm/mach-pxa/zylonite.c
+++ b/arch/arm/mach-pxa/zylonite.c
@@ -366,8 +366,9 @@ static struct mtd_partition zylonite_nand_partitions[] = {
366 366
367static struct pxa3xx_nand_platform_data zylonite_nand_info = { 367static struct pxa3xx_nand_platform_data zylonite_nand_info = {
368 .enable_arbiter = 1, 368 .enable_arbiter = 1,
369 .parts = zylonite_nand_partitions, 369 .num_cs = 1,
370 .nr_parts = ARRAY_SIZE(zylonite_nand_partitions), 370 .parts[0] = zylonite_nand_partitions,
371 .nr_parts[0] = ARRAY_SIZE(zylonite_nand_partitions),
371}; 372};
372 373
373static void __init zylonite_init_nand(void) 374static void __init zylonite_init_nand(void)
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index 2aec2f732515..737bdc631b0d 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5# Common objects 5# Common objects
6obj-y := timer.o console.o clock.o pm_runtime.o 6obj-y := timer.o console.o clock.o
7 7
8# CPU objects 8# CPU objects
9obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o 9obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index 83624e26b884..b862e9f81e3e 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -515,14 +515,14 @@ static void __init ag5evm_init(void)
515 /* enable MMCIF */ 515 /* enable MMCIF */
516 gpio_request(GPIO_FN_MMCCLK0, NULL); 516 gpio_request(GPIO_FN_MMCCLK0, NULL);
517 gpio_request(GPIO_FN_MMCCMD0_PU, NULL); 517 gpio_request(GPIO_FN_MMCCMD0_PU, NULL);
518 gpio_request(GPIO_FN_MMCD0_0, NULL); 518 gpio_request(GPIO_FN_MMCD0_0_PU, NULL);
519 gpio_request(GPIO_FN_MMCD0_1, NULL); 519 gpio_request(GPIO_FN_MMCD0_1_PU, NULL);
520 gpio_request(GPIO_FN_MMCD0_2, NULL); 520 gpio_request(GPIO_FN_MMCD0_2_PU, NULL);
521 gpio_request(GPIO_FN_MMCD0_3, NULL); 521 gpio_request(GPIO_FN_MMCD0_3_PU, NULL);
522 gpio_request(GPIO_FN_MMCD0_4, NULL); 522 gpio_request(GPIO_FN_MMCD0_4_PU, NULL);
523 gpio_request(GPIO_FN_MMCD0_5, NULL); 523 gpio_request(GPIO_FN_MMCD0_5_PU, NULL);
524 gpio_request(GPIO_FN_MMCD0_6, NULL); 524 gpio_request(GPIO_FN_MMCD0_6_PU, NULL);
525 gpio_request(GPIO_FN_MMCD0_7, NULL); 525 gpio_request(GPIO_FN_MMCD0_7_PU, NULL);
526 gpio_request(GPIO_PORT208, NULL); /* Reset */ 526 gpio_request(GPIO_PORT208, NULL); /* Reset */
527 gpio_direction_output(GPIO_PORT208, 1); 527 gpio_direction_output(GPIO_PORT208, 1);
528 528
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index a3aa0f6df964..4c865ece9ac4 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -201,7 +201,7 @@ static struct physmap_flash_data nor_flash_data = {
201static struct resource nor_flash_resources[] = { 201static struct resource nor_flash_resources[] = {
202 [0] = { 202 [0] = {
203 .start = 0x20000000, /* CS0 shadow instead of regular CS0 */ 203 .start = 0x20000000, /* CS0 shadow instead of regular CS0 */
204 .end = 0x28000000 - 1, /* needed by USB MASK ROM boot */ 204 .end = 0x28000000 - 1, /* needed by USB MASK ROM boot */
205 .flags = IORESOURCE_MEM, 205 .flags = IORESOURCE_MEM,
206 } 206 }
207}; 207};
diff --git a/arch/arm/mach-shmobile/board-kota2.c b/arch/arm/mach-shmobile/board-kota2.c
index adc73122bf20..bd9a78424d6b 100644
--- a/arch/arm/mach-shmobile/board-kota2.c
+++ b/arch/arm/mach-shmobile/board-kota2.c
@@ -48,6 +48,7 @@
48#include <asm/hardware/cache-l2x0.h> 48#include <asm/hardware/cache-l2x0.h>
49#include <asm/traps.h> 49#include <asm/traps.h>
50 50
51/* SMSC 9220 */
51static struct resource smsc9220_resources[] = { 52static struct resource smsc9220_resources[] = {
52 [0] = { 53 [0] = {
53 .start = 0x14000000, /* CS5A */ 54 .start = 0x14000000, /* CS5A */
@@ -77,6 +78,7 @@ static struct platform_device eth_device = {
77 .num_resources = ARRAY_SIZE(smsc9220_resources), 78 .num_resources = ARRAY_SIZE(smsc9220_resources),
78}; 79};
79 80
81/* KEYSC */
80static struct sh_keysc_info keysc_platdata = { 82static struct sh_keysc_info keysc_platdata = {
81 .mode = SH_KEYSC_MODE_6, 83 .mode = SH_KEYSC_MODE_6,
82 .scan_timing = 3, 84 .scan_timing = 3,
@@ -120,6 +122,7 @@ static struct platform_device keysc_device = {
120 }, 122 },
121}; 123};
122 124
125/* GPIO KEY */
123#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 } 126#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 }
124 127
125static struct gpio_keys_button gpio_buttons[] = { 128static struct gpio_keys_button gpio_buttons[] = {
@@ -150,6 +153,7 @@ static struct platform_device gpio_keys_device = {
150 }, 153 },
151}; 154};
152 155
156/* GPIO LED */
153#define GPIO_LED(n, g) { .name = n, .gpio = g } 157#define GPIO_LED(n, g) { .name = n, .gpio = g }
154 158
155static struct gpio_led gpio_leds[] = { 159static struct gpio_led gpio_leds[] = {
@@ -175,6 +179,7 @@ static struct platform_device gpio_leds_device = {
175 }, 179 },
176}; 180};
177 181
182/* MMCIF */
178static struct resource mmcif_resources[] = { 183static struct resource mmcif_resources[] = {
179 [0] = { 184 [0] = {
180 .name = "MMCIF", 185 .name = "MMCIF",
@@ -207,6 +212,7 @@ static struct platform_device mmcif_device = {
207 .resource = mmcif_resources, 212 .resource = mmcif_resources,
208}; 213};
209 214
215/* SDHI0 */
210static struct sh_mobile_sdhi_info sdhi0_info = { 216static struct sh_mobile_sdhi_info sdhi0_info = {
211 .tmio_caps = MMC_CAP_SD_HIGHSPEED, 217 .tmio_caps = MMC_CAP_SD_HIGHSPEED,
212 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT, 218 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT,
@@ -243,6 +249,7 @@ static struct platform_device sdhi0_device = {
243 }, 249 },
244}; 250};
245 251
252/* SDHI1 */
246static struct sh_mobile_sdhi_info sdhi1_info = { 253static struct sh_mobile_sdhi_info sdhi1_info = {
247 .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ, 254 .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
248 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT, 255 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT,
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 66975921e646..995a9c3aec8f 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -476,7 +476,7 @@ static struct clk_ops fsidiv_clk_ops = {
476 .disable = fsidiv_disable, 476 .disable = fsidiv_disable,
477}; 477};
478 478
479static struct clk_mapping sh7372_fsidiva_clk_mapping = { 479static struct clk_mapping fsidiva_clk_mapping = {
480 .phys = FSIDIVA, 480 .phys = FSIDIVA,
481 .len = 8, 481 .len = 8,
482}; 482};
@@ -484,10 +484,10 @@ static struct clk_mapping sh7372_fsidiva_clk_mapping = {
484struct clk sh7372_fsidiva_clk = { 484struct clk sh7372_fsidiva_clk = {
485 .ops = &fsidiv_clk_ops, 485 .ops = &fsidiv_clk_ops,
486 .parent = &div6_reparent_clks[DIV6_FSIA], /* late install */ 486 .parent = &div6_reparent_clks[DIV6_FSIA], /* late install */
487 .mapping = &sh7372_fsidiva_clk_mapping, 487 .mapping = &fsidiva_clk_mapping,
488}; 488};
489 489
490static struct clk_mapping sh7372_fsidivb_clk_mapping = { 490static struct clk_mapping fsidivb_clk_mapping = {
491 .phys = FSIDIVB, 491 .phys = FSIDIVB,
492 .len = 8, 492 .len = 8,
493}; 493};
@@ -495,7 +495,7 @@ static struct clk_mapping sh7372_fsidivb_clk_mapping = {
495struct clk sh7372_fsidivb_clk = { 495struct clk sh7372_fsidivb_clk = {
496 .ops = &fsidiv_clk_ops, 496 .ops = &fsidiv_clk_ops,
497 .parent = &div6_reparent_clks[DIV6_FSIB], /* late install */ 497 .parent = &div6_reparent_clks[DIV6_FSIB], /* late install */
498 .mapping = &sh7372_fsidivb_clk_mapping, 498 .mapping = &fsidivb_clk_mapping,
499}; 499};
500 500
501static struct clk *late_main_clks[] = { 501static struct clk *late_main_clks[] = {
diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c
index 2e44f11f592e..1b2334277e85 100644
--- a/arch/arm/mach-shmobile/cpuidle.c
+++ b/arch/arm/mach-shmobile/cpuidle.c
@@ -26,65 +26,59 @@ void (*shmobile_cpuidle_modes[CPUIDLE_STATE_MAX])(void) = {
26}; 26};
27 27
28static int shmobile_cpuidle_enter(struct cpuidle_device *dev, 28static int shmobile_cpuidle_enter(struct cpuidle_device *dev,
29 struct cpuidle_state *state) 29 struct cpuidle_driver *drv,
30 int index)
30{ 31{
31 ktime_t before, after; 32 ktime_t before, after;
32 int requested_state = state - &dev->states[0];
33 33
34 dev->last_state = &dev->states[requested_state];
35 before = ktime_get(); 34 before = ktime_get();
36 35
37 local_irq_disable(); 36 local_irq_disable();
38 local_fiq_disable(); 37 local_fiq_disable();
39 38
40 shmobile_cpuidle_modes[requested_state](); 39 shmobile_cpuidle_modes[index]();
41 40
42 local_irq_enable(); 41 local_irq_enable();
43 local_fiq_enable(); 42 local_fiq_enable();
44 43
45 after = ktime_get(); 44 after = ktime_get();
46 return ktime_to_ns(ktime_sub(after, before)) >> 10; 45 dev->last_residency = ktime_to_ns(ktime_sub(after, before)) >> 10;
46
47 return index;
47} 48}
48 49
49static struct cpuidle_device shmobile_cpuidle_dev; 50static struct cpuidle_device shmobile_cpuidle_dev;
50static struct cpuidle_driver shmobile_cpuidle_driver = { 51static struct cpuidle_driver shmobile_cpuidle_driver = {
51 .name = "shmobile_cpuidle", 52 .name = "shmobile_cpuidle",
52 .owner = THIS_MODULE, 53 .owner = THIS_MODULE,
54 .states[0] = {
55 .name = "C1",
56 .desc = "WFI",
57 .exit_latency = 1,
58 .target_residency = 1 * 2,
59 .flags = CPUIDLE_FLAG_TIME_VALID,
60 },
61 .safe_state_index = 0, /* C1 */
62 .state_count = 1,
53}; 63};
54 64
55void (*shmobile_cpuidle_setup)(struct cpuidle_device *dev); 65void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
56 66
57static int shmobile_cpuidle_init(void) 67static int shmobile_cpuidle_init(void)
58{ 68{
59 struct cpuidle_device *dev = &shmobile_cpuidle_dev; 69 struct cpuidle_device *dev = &shmobile_cpuidle_dev;
60 struct cpuidle_state *state; 70 struct cpuidle_driver *drv = &shmobile_cpuidle_driver;
61 int i; 71 int i;
62 72
63 cpuidle_register_driver(&shmobile_cpuidle_driver); 73 for (i = 0; i < CPUIDLE_STATE_MAX; i++)
64 74 drv->states[i].enter = shmobile_cpuidle_enter;
65 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
66 dev->states[i].name[0] = '\0';
67 dev->states[i].desc[0] = '\0';
68 dev->states[i].enter = shmobile_cpuidle_enter;
69 }
70
71 i = CPUIDLE_DRIVER_STATE_START;
72
73 state = &dev->states[i++];
74 snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
75 strncpy(state->desc, "WFI", CPUIDLE_DESC_LEN);
76 state->exit_latency = 1;
77 state->target_residency = 1 * 2;
78 state->power_usage = 3;
79 state->flags = 0;
80 state->flags |= CPUIDLE_FLAG_TIME_VALID;
81
82 dev->safe_state = state;
83 dev->state_count = i;
84 75
85 if (shmobile_cpuidle_setup) 76 if (shmobile_cpuidle_setup)
86 shmobile_cpuidle_setup(dev); 77 shmobile_cpuidle_setup(drv);
78
79 cpuidle_register_driver(drv);
87 80
81 dev->state_count = drv->state_count;
88 cpuidle_register_device(dev); 82 cpuidle_register_device(dev);
89 83
90 return 0; 84 return 0;
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index c0cdbf997c91..834bd6cd508f 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -9,9 +9,9 @@ extern int clk_init(void);
9extern void shmobile_handle_irq_intc(struct pt_regs *); 9extern void shmobile_handle_irq_intc(struct pt_regs *);
10extern void shmobile_handle_irq_gic(struct pt_regs *); 10extern void shmobile_handle_irq_gic(struct pt_regs *);
11extern struct platform_suspend_ops shmobile_suspend_ops; 11extern struct platform_suspend_ops shmobile_suspend_ops;
12struct cpuidle_device; 12struct cpuidle_driver;
13extern void (*shmobile_cpuidle_modes[])(void); 13extern void (*shmobile_cpuidle_modes[])(void);
14extern void (*shmobile_cpuidle_setup)(struct cpuidle_device *dev); 14extern void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
15 15
16extern void sh7367_init_irq(void); 16extern void sh7367_init_irq(void);
17extern void sh7367_add_early_devices(void); 17extern void sh7367_add_early_devices(void);
diff --git a/arch/arm/mach-shmobile/include/mach/sh73a0.h b/arch/arm/mach-shmobile/include/mach/sh73a0.h
index 18ae6a990bc2..881d515a9686 100644
--- a/arch/arm/mach-shmobile/include/mach/sh73a0.h
+++ b/arch/arm/mach-shmobile/include/mach/sh73a0.h
@@ -470,6 +470,14 @@ enum {
470 GPIO_FN_SDHICMD2_PU, 470 GPIO_FN_SDHICMD2_PU,
471 GPIO_FN_MMCCMD0_PU, 471 GPIO_FN_MMCCMD0_PU,
472 GPIO_FN_MMCCMD1_PU, 472 GPIO_FN_MMCCMD1_PU,
473 GPIO_FN_MMCD0_0_PU,
474 GPIO_FN_MMCD0_1_PU,
475 GPIO_FN_MMCD0_2_PU,
476 GPIO_FN_MMCD0_3_PU,
477 GPIO_FN_MMCD0_4_PU,
478 GPIO_FN_MMCD0_5_PU,
479 GPIO_FN_MMCD0_6_PU,
480 GPIO_FN_MMCD0_7_PU,
473 GPIO_FN_FSIACK_PU, 481 GPIO_FN_FSIACK_PU,
474 GPIO_FN_FSIAILR_PU, 482 GPIO_FN_FSIAILR_PU,
475 GPIO_FN_FSIAIBT_PU, 483 GPIO_FN_FSIAIBT_PU,
diff --git a/arch/arm/mach-shmobile/pfc-sh7367.c b/arch/arm/mach-shmobile/pfc-sh7367.c
index 128555e76e43..e6e524654e67 100644
--- a/arch/arm/mach-shmobile/pfc-sh7367.c
+++ b/arch/arm/mach-shmobile/pfc-sh7367.c
@@ -21,68 +21,49 @@
21#include <linux/gpio.h> 21#include <linux/gpio.h>
22#include <mach/sh7367.h> 22#include <mach/sh7367.h>
23 23
24#define _1(fn, pfx, sfx) fn(pfx, sfx) 24#define CPU_ALL_PORT(fn, pfx, sfx) \
25 25 PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \
26#define _10(fn, pfx, sfx) \ 26 PORT_10(fn, pfx##10, sfx), PORT_90(fn, pfx##1, sfx), \
27 _1(fn, pfx##0, sfx), _1(fn, pfx##1, sfx), \ 27 PORT_10(fn, pfx##20, sfx), PORT_10(fn, pfx##21, sfx), \
28 _1(fn, pfx##2, sfx), _1(fn, pfx##3, sfx), \ 28 PORT_10(fn, pfx##22, sfx), PORT_10(fn, pfx##23, sfx), \
29 _1(fn, pfx##4, sfx), _1(fn, pfx##5, sfx), \ 29 PORT_10(fn, pfx##24, sfx), PORT_10(fn, pfx##25, sfx), \
30 _1(fn, pfx##6, sfx), _1(fn, pfx##7, sfx), \ 30 PORT_10(fn, pfx##26, sfx), PORT_1(fn, pfx##270, sfx), \
31 _1(fn, pfx##8, sfx), _1(fn, pfx##9, sfx) 31 PORT_1(fn, pfx##271, sfx), PORT_1(fn, pfx##272, sfx)
32
33#define _90(fn, pfx, sfx) \
34 _10(fn, pfx##1, sfx), _10(fn, pfx##2, sfx), \
35 _10(fn, pfx##3, sfx), _10(fn, pfx##4, sfx), \
36 _10(fn, pfx##5, sfx), _10(fn, pfx##6, sfx), \
37 _10(fn, pfx##7, sfx), _10(fn, pfx##8, sfx), \
38 _10(fn, pfx##9, sfx)
39
40#define _273(fn, pfx, sfx) \
41 _10(fn, pfx, sfx), _90(fn, pfx, sfx), \
42 _10(fn, pfx##10, sfx), _90(fn, pfx##1, sfx), \
43 _10(fn, pfx##20, sfx), _10(fn, pfx##21, sfx), \
44 _10(fn, pfx##22, sfx), _10(fn, pfx##23, sfx), \
45 _10(fn, pfx##24, sfx), _10(fn, pfx##25, sfx), \
46 _10(fn, pfx##26, sfx), _1(fn, pfx##270, sfx), \
47 _1(fn, pfx##271, sfx), _1(fn, pfx##272, sfx)
48
49#define _PORT(pfx, sfx) pfx##_##sfx
50#define PORT_273(str) _273(_PORT, PORT, str)
51 32
52enum { 33enum {
53 PINMUX_RESERVED = 0, 34 PINMUX_RESERVED = 0,
54 35
55 PINMUX_DATA_BEGIN, 36 PINMUX_DATA_BEGIN,
56 PORT_273(DATA), /* PORT0_DATA -> PORT272_DATA */ 37 PORT_ALL(DATA), /* PORT0_DATA -> PORT272_DATA */
57 PINMUX_DATA_END, 38 PINMUX_DATA_END,
58 39
59 PINMUX_INPUT_BEGIN, 40 PINMUX_INPUT_BEGIN,
60 PORT_273(IN), /* PORT0_IN -> PORT272_IN */ 41 PORT_ALL(IN), /* PORT0_IN -> PORT272_IN */
61 PINMUX_INPUT_END, 42 PINMUX_INPUT_END,
62 43
63 PINMUX_INPUT_PULLUP_BEGIN, 44 PINMUX_INPUT_PULLUP_BEGIN,
64 PORT_273(IN_PU), /* PORT0_IN_PU -> PORT272_IN_PU */ 45 PORT_ALL(IN_PU), /* PORT0_IN_PU -> PORT272_IN_PU */
65 PINMUX_INPUT_PULLUP_END, 46 PINMUX_INPUT_PULLUP_END,
66 47
67 PINMUX_INPUT_PULLDOWN_BEGIN, 48 PINMUX_INPUT_PULLDOWN_BEGIN,
68 PORT_273(IN_PD), /* PORT0_IN_PD -> PORT272_IN_PD */ 49 PORT_ALL(IN_PD), /* PORT0_IN_PD -> PORT272_IN_PD */
69 PINMUX_INPUT_PULLDOWN_END, 50 PINMUX_INPUT_PULLDOWN_END,
70 51
71 PINMUX_OUTPUT_BEGIN, 52 PINMUX_OUTPUT_BEGIN,
72 PORT_273(OUT), /* PORT0_OUT -> PORT272_OUT */ 53 PORT_ALL(OUT), /* PORT0_OUT -> PORT272_OUT */
73 PINMUX_OUTPUT_END, 54 PINMUX_OUTPUT_END,
74 55
75 PINMUX_FUNCTION_BEGIN, 56 PINMUX_FUNCTION_BEGIN,
76 PORT_273(FN_IN), /* PORT0_FN_IN -> PORT272_FN_IN */ 57 PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT272_FN_IN */
77 PORT_273(FN_OUT), /* PORT0_FN_OUT -> PORT272_FN_OUT */ 58 PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT272_FN_OUT */
78 PORT_273(FN0), /* PORT0_FN0 -> PORT272_FN0 */ 59 PORT_ALL(FN0), /* PORT0_FN0 -> PORT272_FN0 */
79 PORT_273(FN1), /* PORT0_FN1 -> PORT272_FN1 */ 60 PORT_ALL(FN1), /* PORT0_FN1 -> PORT272_FN1 */
80 PORT_273(FN2), /* PORT0_FN2 -> PORT272_FN2 */ 61 PORT_ALL(FN2), /* PORT0_FN2 -> PORT272_FN2 */
81 PORT_273(FN3), /* PORT0_FN3 -> PORT272_FN3 */ 62 PORT_ALL(FN3), /* PORT0_FN3 -> PORT272_FN3 */
82 PORT_273(FN4), /* PORT0_FN4 -> PORT272_FN4 */ 63 PORT_ALL(FN4), /* PORT0_FN4 -> PORT272_FN4 */
83 PORT_273(FN5), /* PORT0_FN5 -> PORT272_FN5 */ 64 PORT_ALL(FN5), /* PORT0_FN5 -> PORT272_FN5 */
84 PORT_273(FN6), /* PORT0_FN6 -> PORT272_FN6 */ 65 PORT_ALL(FN6), /* PORT0_FN6 -> PORT272_FN6 */
85 PORT_273(FN7), /* PORT0_FN7 -> PORT272_FN7 */ 66 PORT_ALL(FN7), /* PORT0_FN7 -> PORT272_FN7 */
86 67
87 MSELBCR_MSEL2_1, MSELBCR_MSEL2_0, 68 MSELBCR_MSEL2_1, MSELBCR_MSEL2_0,
88 PINMUX_FUNCTION_END, 69 PINMUX_FUNCTION_END,
@@ -327,41 +308,6 @@ enum {
327 PINMUX_MARK_END, 308 PINMUX_MARK_END,
328}; 309};
329 310
330#define PORT_DATA_I(nr) \
331 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_IN)
332
333#define PORT_DATA_I_PD(nr) \
334 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
335 PORT##nr##_IN, PORT##nr##_IN_PD)
336
337#define PORT_DATA_I_PU(nr) \
338 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
339 PORT##nr##_IN, PORT##nr##_IN_PU)
340
341#define PORT_DATA_I_PU_PD(nr) \
342 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
343 PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
344
345#define PORT_DATA_O(nr) \
346 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT)
347
348#define PORT_DATA_IO(nr) \
349 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
350 PORT##nr##_IN)
351
352#define PORT_DATA_IO_PD(nr) \
353 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
354 PORT##nr##_IN, PORT##nr##_IN_PD)
355
356#define PORT_DATA_IO_PU(nr) \
357 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
358 PORT##nr##_IN, PORT##nr##_IN_PU)
359
360#define PORT_DATA_IO_PU_PD(nr) \
361 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
362 PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
363
364
365static pinmux_enum_t pinmux_data[] = { 311static pinmux_enum_t pinmux_data[] = {
366 312
367 /* specify valid pin states for each pin in GPIO mode */ 313 /* specify valid pin states for each pin in GPIO mode */
@@ -1098,13 +1044,9 @@ static pinmux_enum_t pinmux_data[] = {
1098 PINMUX_DATA(DIVLOCK_MARK, PORT272_FN1), 1044 PINMUX_DATA(DIVLOCK_MARK, PORT272_FN1),
1099}; 1045};
1100 1046
1101#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA)
1102#define GPIO_PORT_273() _273(_GPIO_PORT, , unused)
1103#define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK)
1104
1105static struct pinmux_gpio pinmux_gpios[] = { 1047static struct pinmux_gpio pinmux_gpios[] = {
1106 /* 49-1 -> 49-6 (GPIO) */ 1048 /* 49-1 -> 49-6 (GPIO) */
1107 GPIO_PORT_273(), 1049 GPIO_PORT_ALL(),
1108 1050
1109 /* Special Pull-up / Pull-down Functions */ 1051 /* Special Pull-up / Pull-down Functions */
1110 GPIO_FN(PORT48_KEYIN0_PU), GPIO_FN(PORT49_KEYIN1_PU), 1052 GPIO_FN(PORT48_KEYIN0_PU), GPIO_FN(PORT49_KEYIN1_PU),
@@ -1345,22 +1287,6 @@ static struct pinmux_gpio pinmux_gpios[] = {
1345 GPIO_FN(DIVLOCK), 1287 GPIO_FN(DIVLOCK),
1346}; 1288};
1347 1289
1348/* helper for top 4 bits in PORTnCR */
1349#define PCRH(in, in_pd, in_pu, out) \
1350 0, (out), (in), 0, \
1351 0, 0, 0, 0, \
1352 0, 0, (in_pd), 0, \
1353 0, 0, (in_pu), 0
1354
1355#define PORTCR(nr, reg) \
1356 { PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
1357 PCRH(PORT##nr##_IN, PORT##nr##_IN_PD, \
1358 PORT##nr##_IN_PU, PORT##nr##_OUT), \
1359 PORT##nr##_FN0, PORT##nr##_FN1, PORT##nr##_FN2, \
1360 PORT##nr##_FN3, PORT##nr##_FN4, PORT##nr##_FN5, \
1361 PORT##nr##_FN6, PORT##nr##_FN7 } \
1362 }
1363
1364static struct pinmux_cfg_reg pinmux_config_regs[] = { 1290static struct pinmux_cfg_reg pinmux_config_regs[] = {
1365 PORTCR(0, 0xe6050000), /* PORT0CR */ 1291 PORTCR(0, 0xe6050000), /* PORT0CR */
1366 PORTCR(1, 0xe6050001), /* PORT1CR */ 1292 PORTCR(1, 0xe6050001), /* PORT1CR */
diff --git a/arch/arm/mach-shmobile/pfc-sh7372.c b/arch/arm/mach-shmobile/pfc-sh7372.c
index 9c265dae138a..1bd6585a6acf 100644
--- a/arch/arm/mach-shmobile/pfc-sh7372.c
+++ b/arch/arm/mach-shmobile/pfc-sh7372.c
@@ -25,27 +25,13 @@
25#include <linux/gpio.h> 25#include <linux/gpio.h>
26#include <mach/sh7372.h> 26#include <mach/sh7372.h>
27 27
28#define _1(fn, pfx, sfx) fn(pfx, sfx) 28#define CPU_ALL_PORT(fn, pfx, sfx) \
29 29 PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \
30#define _10(fn, pfx, sfx) \ 30 PORT_10(fn, pfx##10, sfx), PORT_10(fn, pfx##11, sfx), \
31 _1(fn, pfx##0, sfx), _1(fn, pfx##1, sfx), \ 31 PORT_10(fn, pfx##12, sfx), PORT_10(fn, pfx##13, sfx), \
32 _1(fn, pfx##2, sfx), _1(fn, pfx##3, sfx), \ 32 PORT_10(fn, pfx##14, sfx), PORT_10(fn, pfx##15, sfx), \
33 _1(fn, pfx##4, sfx), _1(fn, pfx##5, sfx), \ 33 PORT_10(fn, pfx##16, sfx), PORT_10(fn, pfx##17, sfx), \
34 _1(fn, pfx##6, sfx), _1(fn, pfx##7, sfx), \ 34 PORT_10(fn, pfx##18, sfx), PORT_1(fn, pfx##190, sfx)
35 _1(fn, pfx##8, sfx), _1(fn, pfx##9, sfx)
36
37#define _80(fn, pfx, sfx) \
38 _10(fn, pfx##1, sfx), _10(fn, pfx##2, sfx), \
39 _10(fn, pfx##3, sfx), _10(fn, pfx##4, sfx), \
40 _10(fn, pfx##5, sfx), _10(fn, pfx##6, sfx), \
41 _10(fn, pfx##7, sfx), _10(fn, pfx##8, sfx)
42
43#define _190(fn, pfx, sfx) \
44 _10(fn, pfx, sfx), _80(fn, pfx, sfx), _10(fn, pfx##9, sfx), \
45 _10(fn, pfx##10, sfx), _80(fn, pfx##1, sfx), _1(fn, pfx##190, sfx)
46
47#define _PORT(pfx, sfx) pfx##_##sfx
48#define PORT_ALL(str) _190(_PORT, PORT, str)
49 35
50enum { 36enum {
51 PINMUX_RESERVED = 0, 37 PINMUX_RESERVED = 0,
@@ -381,108 +367,124 @@ enum {
381 PINMUX_MARK_END, 367 PINMUX_MARK_END,
382}; 368};
383 369
384/* PORT_DATA_I_PD(nr) */
385#define _I___D(nr) \
386 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
387 PORT##nr##_IN, PORT##nr##_IN_PD)
388
389/* PORT_DATA_I_PU(nr) */
390#define _I__U_(nr) \
391 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
392 PORT##nr##_IN, PORT##nr##_IN_PU)
393
394/* PORT_DATA_I_PU_PD(nr) */
395#define _I__UD(nr) \
396 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
397 PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
398
399/* PORT_DATA_O(nr) */
400#define __O___(nr) \
401 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT)
402
403/* PORT_DATA_IO(nr) */
404#define _IO___(nr) \
405 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
406 PORT##nr##_IN)
407
408/* PORT_DATA_IO_PD(nr) */
409#define _IO__D(nr) \
410 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
411 PORT##nr##_IN, PORT##nr##_IN_PD)
412
413/* PORT_DATA_IO_PU(nr) */
414#define _IO_U_(nr) \
415 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
416 PORT##nr##_IN, PORT##nr##_IN_PU)
417
418/* PORT_DATA_IO_PU_PD(nr) */
419#define _IO_UD(nr) \
420 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
421 PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
422
423
424static pinmux_enum_t pinmux_data[] = { 370static pinmux_enum_t pinmux_data[] = {
425 371
426 /* specify valid pin states for each pin in GPIO mode */ 372 /* specify valid pin states for each pin in GPIO mode */
427 373 PORT_DATA_IO_PD(0), PORT_DATA_IO_PD(1),
428 _IO__D(0), _IO__D(1), __O___(2), _I___D(3), _I___D(4), 374 PORT_DATA_O(2), PORT_DATA_I_PD(3),
429 _I___D(5), _IO_UD(6), _I___D(7), _IO__D(8), __O___(9), 375 PORT_DATA_I_PD(4), PORT_DATA_I_PD(5),
430 376 PORT_DATA_IO_PU_PD(6), PORT_DATA_I_PD(7),
431 __O___(10), __O___(11), _IO_UD(12), _IO__D(13), _IO__D(14), 377 PORT_DATA_IO_PD(8), PORT_DATA_O(9),
432 __O___(15), _IO__D(16), _IO__D(17), _I___D(18), _IO___(19), 378
433 379 PORT_DATA_O(10), PORT_DATA_O(11),
434 _IO___(20), _IO___(21), _IO___(22), _IO___(23), _IO___(24), 380 PORT_DATA_IO_PU_PD(12), PORT_DATA_IO_PD(13),
435 _IO___(25), _IO___(26), _IO___(27), _IO___(28), _IO___(29), 381 PORT_DATA_IO_PD(14), PORT_DATA_O(15),
436 382 PORT_DATA_IO_PD(16), PORT_DATA_IO_PD(17),
437 _IO___(30), _IO___(31), _IO___(32), _IO___(33), _IO___(34), 383 PORT_DATA_I_PD(18), PORT_DATA_IO(19),
438 _IO___(35), _IO___(36), _IO___(37), _IO___(38), _IO___(39), 384
439 385 PORT_DATA_IO(20), PORT_DATA_IO(21),
440 _IO___(40), _IO___(41), _IO___(42), _IO___(43), _IO___(44), 386 PORT_DATA_IO(22), PORT_DATA_IO(23),
441 _IO___(45), _IO_U_(46), _IO_U_(47), _IO_U_(48), _IO_U_(49), 387 PORT_DATA_IO(24), PORT_DATA_IO(25),
442 388 PORT_DATA_IO(26), PORT_DATA_IO(27),
443 _IO_U_(50), _IO_U_(51), _IO_U_(52), _IO_U_(53), _IO_U_(54), 389 PORT_DATA_IO(28), PORT_DATA_IO(29),
444 _IO_U_(55), _IO_U_(56), _IO_U_(57), _IO_U_(58), _IO_U_(59), 390
445 391 PORT_DATA_IO(30), PORT_DATA_IO(31),
446 _IO_U_(60), _IO_U_(61), _IO___(62), __O___(63), __O___(64), 392 PORT_DATA_IO(32), PORT_DATA_IO(33),
447 _IO_U_(65), __O___(66), _IO_U_(67), __O___(68), _IO___(69), /*66?*/ 393 PORT_DATA_IO(34), PORT_DATA_IO(35),
448 394 PORT_DATA_IO(36), PORT_DATA_IO(37),
449 _IO___(70), _IO___(71), __O___(72), _I__U_(73), _I__UD(74), 395 PORT_DATA_IO(38), PORT_DATA_IO(39),
450 _IO_UD(75), _IO_UD(76), _IO_UD(77), _IO_UD(78), _IO_UD(79), 396
451 397 PORT_DATA_IO(40), PORT_DATA_IO(41),
452 _IO_UD(80), _IO_UD(81), _IO_UD(82), _IO_UD(83), _IO_UD(84), 398 PORT_DATA_IO(42), PORT_DATA_IO(43),
453 _IO_UD(85), _IO_UD(86), _IO_UD(87), _IO_UD(88), _IO_UD(89), 399 PORT_DATA_IO(44), PORT_DATA_IO(45),
454 400 PORT_DATA_IO_PU(46), PORT_DATA_IO_PU(47),
455 _IO_UD(90), _IO_UD(91), _IO_UD(92), _IO_UD(93), _IO_UD(94), 401 PORT_DATA_IO_PU(48), PORT_DATA_IO_PU(49),
456 _IO_UD(95), _IO_U_(96), _IO_UD(97), _IO_UD(98), __O___(99), /*99?*/ 402
457 403 PORT_DATA_IO_PU(50), PORT_DATA_IO_PU(51),
458 _IO__D(100), _IO__D(101), _IO__D(102), _IO__D(103), _IO__D(104), 404 PORT_DATA_IO_PU(52), PORT_DATA_IO_PU(53),
459 _IO__D(105), _IO_U_(106), _IO_U_(107), _IO_U_(108), _IO_U_(109), 405 PORT_DATA_IO_PU(54), PORT_DATA_IO_PU(55),
460 406 PORT_DATA_IO_PU(56), PORT_DATA_IO_PU(57),
461 _IO_U_(110), _IO_U_(111), _IO__D(112), _IO__D(113), _IO_U_(114), 407 PORT_DATA_IO_PU(58), PORT_DATA_IO_PU(59),
462 _IO_U_(115), _IO_U_(116), _IO_U_(117), _IO_U_(118), _IO_U_(119), 408
463 409 PORT_DATA_IO_PU(60), PORT_DATA_IO_PU(61),
464 _IO_U_(120), _IO__D(121), _IO__D(122), _IO__D(123), _IO__D(124), 410 PORT_DATA_IO(62), PORT_DATA_O(63),
465 _IO__D(125), _IO__D(126), _IO__D(127), _IO__D(128), _IO_UD(129), 411 PORT_DATA_O(64), PORT_DATA_IO_PU(65),
466 412 PORT_DATA_O(66), PORT_DATA_IO_PU(67), /*66?*/
467 _IO_UD(130), _IO_UD(131), _IO_UD(132), _IO_UD(133), _IO_UD(134), 413 PORT_DATA_O(68), PORT_DATA_IO(69),
468 _IO_UD(135), _IO__D(136), _IO__D(137), _IO__D(138), _IO__D(139), 414
469 415 PORT_DATA_IO(70), PORT_DATA_IO(71),
470 _IO__D(140), _IO__D(141), _IO__D(142), _IO_UD(143), _IO__D(144), 416 PORT_DATA_O(72), PORT_DATA_I_PU(73),
471 _IO__D(145), _IO__D(146), _IO__D(147), _IO__D(148), _IO__D(149), 417 PORT_DATA_I_PU_PD(74), PORT_DATA_IO_PU_PD(75),
472 418 PORT_DATA_IO_PU_PD(76), PORT_DATA_IO_PU_PD(77),
473 _IO__D(150), _IO__D(151), _IO_UD(152), _I___D(153), _IO_UD(154), 419 PORT_DATA_IO_PU_PD(78), PORT_DATA_IO_PU_PD(79),
474 _I___D(155), _IO__D(156), _IO__D(157), _I___D(158), _IO__D(159), 420
475 421 PORT_DATA_IO_PU_PD(80), PORT_DATA_IO_PU_PD(81),
476 __O___(160), _IO__D(161), _IO__D(162), _IO__D(163), _I___D(164), 422 PORT_DATA_IO_PU_PD(82), PORT_DATA_IO_PU_PD(83),
477 _IO__D(165), _I___D(166), _I___D(167), _I___D(168), _I___D(169), 423 PORT_DATA_IO_PU_PD(84), PORT_DATA_IO_PU_PD(85),
478 424 PORT_DATA_IO_PU_PD(86), PORT_DATA_IO_PU_PD(87),
479 _I___D(170), __O___(171), _IO_UD(172), _IO_UD(173), _IO_UD(174), 425 PORT_DATA_IO_PU_PD(88), PORT_DATA_IO_PU_PD(89),
480 _IO_UD(175), _IO_UD(176), _IO_UD(177), _IO_UD(178), __O___(179), 426
481 427 PORT_DATA_IO_PU_PD(90), PORT_DATA_IO_PU_PD(91),
482 _IO_UD(180), _IO_UD(181), _IO_UD(182), _IO_UD(183), _IO_UD(184), 428 PORT_DATA_IO_PU_PD(92), PORT_DATA_IO_PU_PD(93),
483 __O___(185), _IO_UD(186), _IO_UD(187), _IO_UD(188), _IO_UD(189), 429 PORT_DATA_IO_PU_PD(94), PORT_DATA_IO_PU_PD(95),
484 430 PORT_DATA_IO_PU(96), PORT_DATA_IO_PU_PD(97),
485 _IO_UD(190), 431 PORT_DATA_IO_PU_PD(98), PORT_DATA_O(99), /*99?*/
432
433 PORT_DATA_IO_PD(100), PORT_DATA_IO_PD(101),
434 PORT_DATA_IO_PD(102), PORT_DATA_IO_PD(103),
435 PORT_DATA_IO_PD(104), PORT_DATA_IO_PD(105),
436 PORT_DATA_IO_PU(106), PORT_DATA_IO_PU(107),
437 PORT_DATA_IO_PU(108), PORT_DATA_IO_PU(109),
438
439 PORT_DATA_IO_PU(110), PORT_DATA_IO_PU(111),
440 PORT_DATA_IO_PD(112), PORT_DATA_IO_PD(113),
441 PORT_DATA_IO_PU(114), PORT_DATA_IO_PU(115),
442 PORT_DATA_IO_PU(116), PORT_DATA_IO_PU(117),
443 PORT_DATA_IO_PU(118), PORT_DATA_IO_PU(119),
444
445 PORT_DATA_IO_PU(120), PORT_DATA_IO_PD(121),
446 PORT_DATA_IO_PD(122), PORT_DATA_IO_PD(123),
447 PORT_DATA_IO_PD(124), PORT_DATA_IO_PD(125),
448 PORT_DATA_IO_PD(126), PORT_DATA_IO_PD(127),
449 PORT_DATA_IO_PD(128), PORT_DATA_IO_PU_PD(129),
450
451 PORT_DATA_IO_PU_PD(130), PORT_DATA_IO_PU_PD(131),
452 PORT_DATA_IO_PU_PD(132), PORT_DATA_IO_PU_PD(133),
453 PORT_DATA_IO_PU_PD(134), PORT_DATA_IO_PU_PD(135),
454 PORT_DATA_IO_PD(136), PORT_DATA_IO_PD(137),
455 PORT_DATA_IO_PD(138), PORT_DATA_IO_PD(139),
456
457 PORT_DATA_IO_PD(140), PORT_DATA_IO_PD(141),
458 PORT_DATA_IO_PD(142), PORT_DATA_IO_PU_PD(143),
459 PORT_DATA_IO_PD(144), PORT_DATA_IO_PD(145),
460 PORT_DATA_IO_PD(146), PORT_DATA_IO_PD(147),
461 PORT_DATA_IO_PD(148), PORT_DATA_IO_PD(149),
462
463 PORT_DATA_IO_PD(150), PORT_DATA_IO_PD(151),
464 PORT_DATA_IO_PU_PD(152), PORT_DATA_I_PD(153),
465 PORT_DATA_IO_PU_PD(154), PORT_DATA_I_PD(155),
466 PORT_DATA_IO_PD(156), PORT_DATA_IO_PD(157),
467 PORT_DATA_I_PD(158), PORT_DATA_IO_PD(159),
468
469 PORT_DATA_O(160), PORT_DATA_IO_PD(161),
470 PORT_DATA_IO_PD(162), PORT_DATA_IO_PD(163),
471 PORT_DATA_I_PD(164), PORT_DATA_IO_PD(165),
472 PORT_DATA_I_PD(166), PORT_DATA_I_PD(167),
473 PORT_DATA_I_PD(168), PORT_DATA_I_PD(169),
474
475 PORT_DATA_I_PD(170), PORT_DATA_O(171),
476 PORT_DATA_IO_PU_PD(172), PORT_DATA_IO_PU_PD(173),
477 PORT_DATA_IO_PU_PD(174), PORT_DATA_IO_PU_PD(175),
478 PORT_DATA_IO_PU_PD(176), PORT_DATA_IO_PU_PD(177),
479 PORT_DATA_IO_PU_PD(178), PORT_DATA_O(179),
480
481 PORT_DATA_IO_PU_PD(180), PORT_DATA_IO_PU_PD(181),
482 PORT_DATA_IO_PU_PD(182), PORT_DATA_IO_PU_PD(183),
483 PORT_DATA_IO_PU_PD(184), PORT_DATA_O(185),
484 PORT_DATA_IO_PU_PD(186), PORT_DATA_IO_PU_PD(187),
485 PORT_DATA_IO_PU_PD(188), PORT_DATA_IO_PU_PD(189),
486
487 PORT_DATA_IO_PU_PD(190),
486 488
487 /* IRQ */ 489 /* IRQ */
488 PINMUX_DATA(IRQ0_6_MARK, PORT6_FN0, MSEL1CR_0_0), 490 PINMUX_DATA(IRQ0_6_MARK, PORT6_FN0, MSEL1CR_0_0),
@@ -926,10 +928,6 @@ static pinmux_enum_t pinmux_data[] = {
926 PINMUX_DATA(MFIv4_MARK, MSEL4CR_6_1), 928 PINMUX_DATA(MFIv4_MARK, MSEL4CR_6_1),
927}; 929};
928 930
929#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA)
930#define GPIO_PORT_ALL() _190(_GPIO_PORT, , unused)
931#define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK)
932
933static struct pinmux_gpio pinmux_gpios[] = { 931static struct pinmux_gpio pinmux_gpios[] = {
934 932
935 /* PORT */ 933 /* PORT */
@@ -1201,22 +1199,6 @@ static struct pinmux_gpio pinmux_gpios[] = {
1201 GPIO_FN(SDENC_DV_CLKI), 1199 GPIO_FN(SDENC_DV_CLKI),
1202}; 1200};
1203 1201
1204/* helper for top 4 bits in PORTnCR */
1205#define PCRH(in, in_pd, in_pu, out) \
1206 0, (out), (in), 0, \
1207 0, 0, 0, 0, \
1208 0, 0, (in_pd), 0, \
1209 0, 0, (in_pu), 0
1210
1211#define PORTCR(nr, reg) \
1212 { PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
1213 PCRH(PORT##nr##_IN, PORT##nr##_IN_PD, \
1214 PORT##nr##_IN_PU, PORT##nr##_OUT), \
1215 PORT##nr##_FN0, PORT##nr##_FN1, PORT##nr##_FN2, \
1216 PORT##nr##_FN3, PORT##nr##_FN4, PORT##nr##_FN5, \
1217 PORT##nr##_FN6, PORT##nr##_FN7 } \
1218 }
1219
1220static struct pinmux_cfg_reg pinmux_config_regs[] = { 1202static struct pinmux_cfg_reg pinmux_config_regs[] = {
1221 PORTCR(0, 0xE6051000), /* PORT0CR */ 1203 PORTCR(0, 0xE6051000), /* PORT0CR */
1222 PORTCR(1, 0xE6051001), /* PORT1CR */ 1204 PORTCR(1, 0xE6051001), /* PORT1CR */
diff --git a/arch/arm/mach-shmobile/pfc-sh7377.c b/arch/arm/mach-shmobile/pfc-sh7377.c
index 613e6842ad05..2f10511946ad 100644
--- a/arch/arm/mach-shmobile/pfc-sh7377.c
+++ b/arch/arm/mach-shmobile/pfc-sh7377.c
@@ -22,84 +22,65 @@
22#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <mach/sh7377.h> 23#include <mach/sh7377.h>
24 24
25#define _1(fn, pfx, sfx) fn(pfx, sfx) 25#define CPU_ALL_PORT(fn, pfx, sfx) \
26 26 PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \
27#define _10(fn, pfx, sfx) \ 27 PORT_10(fn, pfx##10, sfx), \
28 _1(fn, pfx##0, sfx), _1(fn, pfx##1, sfx), \ 28 PORT_1(fn, pfx##110, sfx), PORT_1(fn, pfx##111, sfx), \
29 _1(fn, pfx##2, sfx), _1(fn, pfx##3, sfx), \ 29 PORT_1(fn, pfx##112, sfx), PORT_1(fn, pfx##113, sfx), \
30 _1(fn, pfx##4, sfx), _1(fn, pfx##5, sfx), \ 30 PORT_1(fn, pfx##114, sfx), PORT_1(fn, pfx##115, sfx), \
31 _1(fn, pfx##6, sfx), _1(fn, pfx##7, sfx), \ 31 PORT_1(fn, pfx##116, sfx), PORT_1(fn, pfx##117, sfx), \
32 _1(fn, pfx##8, sfx), _1(fn, pfx##9, sfx) 32 PORT_1(fn, pfx##118, sfx), \
33 33 PORT_1(fn, pfx##128, sfx), PORT_1(fn, pfx##129, sfx), \
34#define _90(fn, pfx, sfx) \ 34 PORT_10(fn, pfx##13, sfx), PORT_10(fn, pfx##14, sfx), \
35 _10(fn, pfx##1, sfx), _10(fn, pfx##2, sfx), \ 35 PORT_10(fn, pfx##15, sfx), \
36 _10(fn, pfx##3, sfx), _10(fn, pfx##4, sfx), \ 36 PORT_1(fn, pfx##160, sfx), PORT_1(fn, pfx##161, sfx), \
37 _10(fn, pfx##5, sfx), _10(fn, pfx##6, sfx), \ 37 PORT_1(fn, pfx##162, sfx), PORT_1(fn, pfx##163, sfx), \
38 _10(fn, pfx##7, sfx), _10(fn, pfx##8, sfx), \ 38 PORT_1(fn, pfx##164, sfx), \
39 _10(fn, pfx##9, sfx) 39 PORT_1(fn, pfx##192, sfx), PORT_1(fn, pfx##193, sfx), \
40 40 PORT_1(fn, pfx##194, sfx), PORT_1(fn, pfx##195, sfx), \
41#define _265(fn, pfx, sfx) \ 41 PORT_1(fn, pfx##196, sfx), PORT_1(fn, pfx##197, sfx), \
42 _10(fn, pfx, sfx), _90(fn, pfx, sfx), \ 42 PORT_1(fn, pfx##198, sfx), PORT_1(fn, pfx##199, sfx), \
43 _10(fn, pfx##10, sfx), \ 43 PORT_10(fn, pfx##20, sfx), PORT_10(fn, pfx##21, sfx), \
44 _1(fn, pfx##110, sfx), _1(fn, pfx##111, sfx), \ 44 PORT_10(fn, pfx##22, sfx), PORT_10(fn, pfx##23, sfx), \
45 _1(fn, pfx##112, sfx), _1(fn, pfx##113, sfx), \ 45 PORT_10(fn, pfx##24, sfx), PORT_10(fn, pfx##25, sfx), \
46 _1(fn, pfx##114, sfx), _1(fn, pfx##115, sfx), \ 46 PORT_1(fn, pfx##260, sfx), PORT_1(fn, pfx##261, sfx), \
47 _1(fn, pfx##116, sfx), _1(fn, pfx##117, sfx), \ 47 PORT_1(fn, pfx##262, sfx), PORT_1(fn, pfx##263, sfx), \
48 _1(fn, pfx##118, sfx), \ 48 PORT_1(fn, pfx##264, sfx)
49 _1(fn, pfx##128, sfx), _1(fn, pfx##129, sfx), \
50 _10(fn, pfx##13, sfx), _10(fn, pfx##14, sfx), \
51 _10(fn, pfx##15, sfx), \
52 _1(fn, pfx##160, sfx), _1(fn, pfx##161, sfx), \
53 _1(fn, pfx##162, sfx), _1(fn, pfx##163, sfx), \
54 _1(fn, pfx##164, sfx), \
55 _1(fn, pfx##192, sfx), _1(fn, pfx##193, sfx), \
56 _1(fn, pfx##194, sfx), _1(fn, pfx##195, sfx), \
57 _1(fn, pfx##196, sfx), _1(fn, pfx##197, sfx), \
58 _1(fn, pfx##198, sfx), _1(fn, pfx##199, sfx), \
59 _10(fn, pfx##20, sfx), _10(fn, pfx##21, sfx), \
60 _10(fn, pfx##22, sfx), _10(fn, pfx##23, sfx), \
61 _10(fn, pfx##24, sfx), _10(fn, pfx##25, sfx), \
62 _1(fn, pfx##260, sfx), _1(fn, pfx##261, sfx), \
63 _1(fn, pfx##262, sfx), _1(fn, pfx##263, sfx), \
64 _1(fn, pfx##264, sfx)
65
66#define _PORT(pfx, sfx) pfx##_##sfx
67#define PORT_265(str) _265(_PORT, PORT, str)
68 49
69enum { 50enum {
70 PINMUX_RESERVED = 0, 51 PINMUX_RESERVED = 0,
71 52
72 PINMUX_DATA_BEGIN, 53 PINMUX_DATA_BEGIN,
73 PORT_265(DATA), /* PORT0_DATA -> PORT264_DATA */ 54 PORT_ALL(DATA), /* PORT0_DATA -> PORT264_DATA */
74 PINMUX_DATA_END, 55 PINMUX_DATA_END,
75 56
76 PINMUX_INPUT_BEGIN, 57 PINMUX_INPUT_BEGIN,
77 PORT_265(IN), /* PORT0_IN -> PORT264_IN */ 58 PORT_ALL(IN), /* PORT0_IN -> PORT264_IN */
78 PINMUX_INPUT_END, 59 PINMUX_INPUT_END,
79 60
80 PINMUX_INPUT_PULLUP_BEGIN, 61 PINMUX_INPUT_PULLUP_BEGIN,
81 PORT_265(IN_PU), /* PORT0_IN_PU -> PORT264_IN_PU */ 62 PORT_ALL(IN_PU), /* PORT0_IN_PU -> PORT264_IN_PU */
82 PINMUX_INPUT_PULLUP_END, 63 PINMUX_INPUT_PULLUP_END,
83 64
84 PINMUX_INPUT_PULLDOWN_BEGIN, 65 PINMUX_INPUT_PULLDOWN_BEGIN,
85 PORT_265(IN_PD), /* PORT0_IN_PD -> PORT264_IN_PD */ 66 PORT_ALL(IN_PD), /* PORT0_IN_PD -> PORT264_IN_PD */
86 PINMUX_INPUT_PULLDOWN_END, 67 PINMUX_INPUT_PULLDOWN_END,
87 68
88 PINMUX_OUTPUT_BEGIN, 69 PINMUX_OUTPUT_BEGIN,
89 PORT_265(OUT), /* PORT0_OUT -> PORT264_OUT */ 70 PORT_ALL(OUT), /* PORT0_OUT -> PORT264_OUT */
90 PINMUX_OUTPUT_END, 71 PINMUX_OUTPUT_END,
91 72
92 PINMUX_FUNCTION_BEGIN, 73 PINMUX_FUNCTION_BEGIN,
93 PORT_265(FN_IN), /* PORT0_FN_IN -> PORT264_FN_IN */ 74 PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT264_FN_IN */
94 PORT_265(FN_OUT), /* PORT0_FN_OUT -> PORT264_FN_OUT */ 75 PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT264_FN_OUT */
95 PORT_265(FN0), /* PORT0_FN0 -> PORT264_FN0 */ 76 PORT_ALL(FN0), /* PORT0_FN0 -> PORT264_FN0 */
96 PORT_265(FN1), /* PORT0_FN1 -> PORT264_FN1 */ 77 PORT_ALL(FN1), /* PORT0_FN1 -> PORT264_FN1 */
97 PORT_265(FN2), /* PORT0_FN2 -> PORT264_FN2 */ 78 PORT_ALL(FN2), /* PORT0_FN2 -> PORT264_FN2 */
98 PORT_265(FN3), /* PORT0_FN3 -> PORT264_FN3 */ 79 PORT_ALL(FN3), /* PORT0_FN3 -> PORT264_FN3 */
99 PORT_265(FN4), /* PORT0_FN4 -> PORT264_FN4 */ 80 PORT_ALL(FN4), /* PORT0_FN4 -> PORT264_FN4 */
100 PORT_265(FN5), /* PORT0_FN5 -> PORT264_FN5 */ 81 PORT_ALL(FN5), /* PORT0_FN5 -> PORT264_FN5 */
101 PORT_265(FN6), /* PORT0_FN6 -> PORT264_FN6 */ 82 PORT_ALL(FN6), /* PORT0_FN6 -> PORT264_FN6 */
102 PORT_265(FN7), /* PORT0_FN7 -> PORT264_FN7 */ 83 PORT_ALL(FN7), /* PORT0_FN7 -> PORT264_FN7 */
103 84
104 MSELBCR_MSEL17_1, MSELBCR_MSEL17_0, 85 MSELBCR_MSEL17_1, MSELBCR_MSEL17_0,
105 MSELBCR_MSEL16_1, MSELBCR_MSEL16_0, 86 MSELBCR_MSEL16_1, MSELBCR_MSEL16_0,
@@ -360,45 +341,6 @@ enum {
360 PINMUX_MARK_END, 341 PINMUX_MARK_END,
361}; 342};
362 343
363#define PORT_DATA_I(nr) \
364 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_IN)
365
366#define PORT_DATA_I_PD(nr) \
367 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
368 PORT##nr##_IN, PORT##nr##_IN_PD)
369
370#define PORT_DATA_I_PU(nr) \
371 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
372 PORT##nr##_IN, PORT##nr##_IN_PU)
373
374#define PORT_DATA_I_PU_PD(nr) \
375 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
376 PORT##nr##_IN, PORT##nr##_IN_PD, \
377 PORT##nr##_IN_PU)
378
379#define PORT_DATA_O(nr) \
380 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
381 PORT##nr##_OUT)
382
383#define PORT_DATA_IO(nr) \
384 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
385 PORT##nr##_OUT, PORT##nr##_IN)
386
387#define PORT_DATA_IO_PD(nr) \
388 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
389 PORT##nr##_OUT, PORT##nr##_IN, \
390 PORT##nr##_IN_PD)
391
392#define PORT_DATA_IO_PU(nr) \
393 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
394 PORT##nr##_OUT, PORT##nr##_IN, \
395 PORT##nr##_IN_PU)
396
397#define PORT_DATA_IO_PU_PD(nr) \
398 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
399 PORT##nr##_OUT, PORT##nr##_IN, \
400 PORT##nr##_IN_PD, PORT##nr##_IN_PU)
401
402static pinmux_enum_t pinmux_data[] = { 344static pinmux_enum_t pinmux_data[] = {
403 /* specify valid pin states for each pin in GPIO mode */ 345 /* specify valid pin states for each pin in GPIO mode */
404 /* 55-1 (GPIO) */ 346 /* 55-1 (GPIO) */
@@ -1078,13 +1020,9 @@ static pinmux_enum_t pinmux_data[] = {
1078 PINMUX_DATA(RESETOUTS_MARK, PORT264_FN1), 1020 PINMUX_DATA(RESETOUTS_MARK, PORT264_FN1),
1079}; 1021};
1080 1022
1081#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA)
1082#define GPIO_PORT_265() _265(_GPIO_PORT, , unused)
1083#define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK)
1084
1085static struct pinmux_gpio pinmux_gpios[] = { 1023static struct pinmux_gpio pinmux_gpios[] = {
1086 /* 55-1 -> 55-5 (GPIO) */ 1024 /* 55-1 -> 55-5 (GPIO) */
1087 GPIO_PORT_265(), 1025 GPIO_PORT_ALL(),
1088 1026
1089 /* Special Pull-up / Pull-down Functions */ 1027 /* Special Pull-up / Pull-down Functions */
1090 GPIO_FN(PORT66_KEYIN0_PU), GPIO_FN(PORT67_KEYIN1_PU), 1028 GPIO_FN(PORT66_KEYIN0_PU), GPIO_FN(PORT67_KEYIN1_PU),
@@ -1362,23 +1300,6 @@ static struct pinmux_gpio pinmux_gpios[] = {
1362 GPIO_FN(RESETOUTS), 1300 GPIO_FN(RESETOUTS),
1363}; 1301};
1364 1302
1365/* helper for top 4 bits in PORTnCR */
1366#define PCRH(in, in_pd, in_pu, out) \
1367 0, (out), (in), 0, \
1368 0, 0, 0, 0, \
1369 0, 0, (in_pd), 0, \
1370 0, 0, (in_pu), 0
1371
1372#define PORTCR(nr, reg) \
1373 { PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
1374 PCRH(PORT##nr##_IN, PORT##nr##_IN_PD, \
1375 PORT##nr##_IN_PU, PORT##nr##_OUT), \
1376 PORT##nr##_FN0, PORT##nr##_FN1, \
1377 PORT##nr##_FN2, PORT##nr##_FN3, \
1378 PORT##nr##_FN4, PORT##nr##_FN5, \
1379 PORT##nr##_FN6, PORT##nr##_FN7 } \
1380 }
1381
1382static struct pinmux_cfg_reg pinmux_config_regs[] = { 1303static struct pinmux_cfg_reg pinmux_config_regs[] = {
1383 PORTCR(0, 0xe6050000), /* PORT0CR */ 1304 PORTCR(0, 0xe6050000), /* PORT0CR */
1384 PORTCR(1, 0xe6050001), /* PORT1CR */ 1305 PORTCR(1, 0xe6050001), /* PORT1CR */
diff --git a/arch/arm/mach-shmobile/pfc-sh73a0.c b/arch/arm/mach-shmobile/pfc-sh73a0.c
index 5abe02fbd6b9..e05634ce2e0d 100644
--- a/arch/arm/mach-shmobile/pfc-sh73a0.c
+++ b/arch/arm/mach-shmobile/pfc-sh73a0.c
@@ -24,83 +24,71 @@
24#include <mach/sh73a0.h> 24#include <mach/sh73a0.h>
25#include <mach/irqs.h> 25#include <mach/irqs.h>
26 26
27#define _1(fn, pfx, sfx) fn(pfx, sfx) 27#define CPU_ALL_PORT(fn, pfx, sfx) \
28 28 PORT_10(fn, pfx, sfx), PORT_10(fn, pfx##1, sfx), \
29#define _10(fn, pfx, sfx) \ 29 PORT_10(fn, pfx##2, sfx), PORT_10(fn, pfx##3, sfx), \
30 _1(fn, pfx##0, sfx), _1(fn, pfx##1, sfx), \ 30 PORT_10(fn, pfx##4, sfx), PORT_10(fn, pfx##5, sfx), \
31 _1(fn, pfx##2, sfx), _1(fn, pfx##3, sfx), \ 31 PORT_10(fn, pfx##6, sfx), PORT_10(fn, pfx##7, sfx), \
32 _1(fn, pfx##4, sfx), _1(fn, pfx##5, sfx), \ 32 PORT_10(fn, pfx##8, sfx), PORT_10(fn, pfx##9, sfx), \
33 _1(fn, pfx##6, sfx), _1(fn, pfx##7, sfx), \ 33 PORT_10(fn, pfx##10, sfx), \
34 _1(fn, pfx##8, sfx), _1(fn, pfx##9, sfx) 34 PORT_1(fn, pfx##110, sfx), PORT_1(fn, pfx##111, sfx), \
35 35 PORT_1(fn, pfx##112, sfx), PORT_1(fn, pfx##113, sfx), \
36#define _310(fn, pfx, sfx) \ 36 PORT_1(fn, pfx##114, sfx), PORT_1(fn, pfx##115, sfx), \
37 _10(fn, pfx, sfx), _10(fn, pfx##1, sfx), \ 37 PORT_1(fn, pfx##116, sfx), PORT_1(fn, pfx##117, sfx), \
38 _10(fn, pfx##2, sfx), _10(fn, pfx##3, sfx), \ 38 PORT_1(fn, pfx##118, sfx), \
39 _10(fn, pfx##4, sfx), _10(fn, pfx##5, sfx), \ 39 PORT_1(fn, pfx##128, sfx), PORT_1(fn, pfx##129, sfx), \
40 _10(fn, pfx##6, sfx), _10(fn, pfx##7, sfx), \ 40 PORT_10(fn, pfx##13, sfx), PORT_10(fn, pfx##14, sfx), \
41 _10(fn, pfx##8, sfx), _10(fn, pfx##9, sfx), \ 41 PORT_10(fn, pfx##15, sfx), \
42 _10(fn, pfx##10, sfx), \ 42 PORT_1(fn, pfx##160, sfx), PORT_1(fn, pfx##161, sfx), \
43 _1(fn, pfx##110, sfx), _1(fn, pfx##111, sfx), \ 43 PORT_1(fn, pfx##162, sfx), PORT_1(fn, pfx##163, sfx), \
44 _1(fn, pfx##112, sfx), _1(fn, pfx##113, sfx), \ 44 PORT_1(fn, pfx##164, sfx), \
45 _1(fn, pfx##114, sfx), _1(fn, pfx##115, sfx), \ 45 PORT_1(fn, pfx##192, sfx), PORT_1(fn, pfx##193, sfx), \
46 _1(fn, pfx##116, sfx), _1(fn, pfx##117, sfx), \ 46 PORT_1(fn, pfx##194, sfx), PORT_1(fn, pfx##195, sfx), \
47 _1(fn, pfx##118, sfx), \ 47 PORT_1(fn, pfx##196, sfx), PORT_1(fn, pfx##197, sfx), \
48 _1(fn, pfx##128, sfx), _1(fn, pfx##129, sfx), \ 48 PORT_1(fn, pfx##198, sfx), PORT_1(fn, pfx##199, sfx), \
49 _10(fn, pfx##13, sfx), _10(fn, pfx##14, sfx), \ 49 PORT_10(fn, pfx##20, sfx), PORT_10(fn, pfx##21, sfx), \
50 _10(fn, pfx##15, sfx), \ 50 PORT_10(fn, pfx##22, sfx), PORT_10(fn, pfx##23, sfx), \
51 _1(fn, pfx##160, sfx), _1(fn, pfx##161, sfx), \ 51 PORT_10(fn, pfx##24, sfx), PORT_10(fn, pfx##25, sfx), \
52 _1(fn, pfx##162, sfx), _1(fn, pfx##163, sfx), \ 52 PORT_10(fn, pfx##26, sfx), PORT_10(fn, pfx##27, sfx), \
53 _1(fn, pfx##164, sfx), \ 53 PORT_1(fn, pfx##280, sfx), PORT_1(fn, pfx##281, sfx), \
54 _1(fn, pfx##192, sfx), _1(fn, pfx##193, sfx), \ 54 PORT_1(fn, pfx##282, sfx), \
55 _1(fn, pfx##194, sfx), _1(fn, pfx##195, sfx), \ 55 PORT_1(fn, pfx##288, sfx), PORT_1(fn, pfx##289, sfx), \
56 _1(fn, pfx##196, sfx), _1(fn, pfx##197, sfx), \ 56 PORT_10(fn, pfx##29, sfx), PORT_10(fn, pfx##30, sfx)
57 _1(fn, pfx##198, sfx), _1(fn, pfx##199, sfx), \
58 _10(fn, pfx##20, sfx), _10(fn, pfx##21, sfx), \
59 _10(fn, pfx##22, sfx), _10(fn, pfx##23, sfx), \
60 _10(fn, pfx##24, sfx), _10(fn, pfx##25, sfx), \
61 _10(fn, pfx##26, sfx), _10(fn, pfx##27, sfx), \
62 _1(fn, pfx##280, sfx), _1(fn, pfx##281, sfx), \
63 _1(fn, pfx##282, sfx), \
64 _1(fn, pfx##288, sfx), _1(fn, pfx##289, sfx), \
65 _10(fn, pfx##29, sfx), _10(fn, pfx##30, sfx)
66
67#define _PORT(pfx, sfx) pfx##_##sfx
68#define PORT_310(str) _310(_PORT, PORT, str)
69 57
70enum { 58enum {
71 PINMUX_RESERVED = 0, 59 PINMUX_RESERVED = 0,
72 60
73 PINMUX_DATA_BEGIN, 61 PINMUX_DATA_BEGIN,
74 PORT_310(DATA), /* PORT0_DATA -> PORT309_DATA */ 62 PORT_ALL(DATA), /* PORT0_DATA -> PORT309_DATA */
75 PINMUX_DATA_END, 63 PINMUX_DATA_END,
76 64
77 PINMUX_INPUT_BEGIN, 65 PINMUX_INPUT_BEGIN,
78 PORT_310(IN), /* PORT0_IN -> PORT309_IN */ 66 PORT_ALL(IN), /* PORT0_IN -> PORT309_IN */
79 PINMUX_INPUT_END, 67 PINMUX_INPUT_END,
80 68
81 PINMUX_INPUT_PULLUP_BEGIN, 69 PINMUX_INPUT_PULLUP_BEGIN,
82 PORT_310(IN_PU), /* PORT0_IN_PU -> PORT309_IN_PU */ 70 PORT_ALL(IN_PU), /* PORT0_IN_PU -> PORT309_IN_PU */
83 PINMUX_INPUT_PULLUP_END, 71 PINMUX_INPUT_PULLUP_END,
84 72
85 PINMUX_INPUT_PULLDOWN_BEGIN, 73 PINMUX_INPUT_PULLDOWN_BEGIN,
86 PORT_310(IN_PD), /* PORT0_IN_PD -> PORT309_IN_PD */ 74 PORT_ALL(IN_PD), /* PORT0_IN_PD -> PORT309_IN_PD */
87 PINMUX_INPUT_PULLDOWN_END, 75 PINMUX_INPUT_PULLDOWN_END,
88 76
89 PINMUX_OUTPUT_BEGIN, 77 PINMUX_OUTPUT_BEGIN,
90 PORT_310(OUT), /* PORT0_OUT -> PORT309_OUT */ 78 PORT_ALL(OUT), /* PORT0_OUT -> PORT309_OUT */
91 PINMUX_OUTPUT_END, 79 PINMUX_OUTPUT_END,
92 80
93 PINMUX_FUNCTION_BEGIN, 81 PINMUX_FUNCTION_BEGIN,
94 PORT_310(FN_IN), /* PORT0_FN_IN -> PORT309_FN_IN */ 82 PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT309_FN_IN */
95 PORT_310(FN_OUT), /* PORT0_FN_OUT -> PORT309_FN_OUT */ 83 PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT309_FN_OUT */
96 PORT_310(FN0), /* PORT0_FN0 -> PORT309_FN0 */ 84 PORT_ALL(FN0), /* PORT0_FN0 -> PORT309_FN0 */
97 PORT_310(FN1), /* PORT0_FN1 -> PORT309_FN1 */ 85 PORT_ALL(FN1), /* PORT0_FN1 -> PORT309_FN1 */
98 PORT_310(FN2), /* PORT0_FN2 -> PORT309_FN2 */ 86 PORT_ALL(FN2), /* PORT0_FN2 -> PORT309_FN2 */
99 PORT_310(FN3), /* PORT0_FN3 -> PORT309_FN3 */ 87 PORT_ALL(FN3), /* PORT0_FN3 -> PORT309_FN3 */
100 PORT_310(FN4), /* PORT0_FN4 -> PORT309_FN4 */ 88 PORT_ALL(FN4), /* PORT0_FN4 -> PORT309_FN4 */
101 PORT_310(FN5), /* PORT0_FN5 -> PORT309_FN5 */ 89 PORT_ALL(FN5), /* PORT0_FN5 -> PORT309_FN5 */
102 PORT_310(FN6), /* PORT0_FN6 -> PORT309_FN6 */ 90 PORT_ALL(FN6), /* PORT0_FN6 -> PORT309_FN6 */
103 PORT_310(FN7), /* PORT0_FN7 -> PORT309_FN7 */ 91 PORT_ALL(FN7), /* PORT0_FN7 -> PORT309_FN7 */
104 92
105 MSEL2CR_MSEL19_0, MSEL2CR_MSEL19_1, 93 MSEL2CR_MSEL19_0, MSEL2CR_MSEL19_1,
106 MSEL2CR_MSEL18_0, MSEL2CR_MSEL18_1, 94 MSEL2CR_MSEL18_0, MSEL2CR_MSEL18_1,
@@ -508,6 +496,14 @@ enum {
508 SDHICMD2_PU_MARK, 496 SDHICMD2_PU_MARK,
509 MMCCMD0_PU_MARK, 497 MMCCMD0_PU_MARK,
510 MMCCMD1_PU_MARK, 498 MMCCMD1_PU_MARK,
499 MMCD0_0_PU_MARK,
500 MMCD0_1_PU_MARK,
501 MMCD0_2_PU_MARK,
502 MMCD0_3_PU_MARK,
503 MMCD0_4_PU_MARK,
504 MMCD0_5_PU_MARK,
505 MMCD0_6_PU_MARK,
506 MMCD0_7_PU_MARK,
511 FSIBISLD_PU_MARK, 507 FSIBISLD_PU_MARK,
512 FSIACK_PU_MARK, 508 FSIACK_PU_MARK,
513 FSIAILR_PU_MARK, 509 FSIAILR_PU_MARK,
@@ -517,45 +513,6 @@ enum {
517 PINMUX_MARK_END, 513 PINMUX_MARK_END,
518}; 514};
519 515
520#define PORT_DATA_I(nr) \
521 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_IN)
522
523#define PORT_DATA_I_PD(nr) \
524 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
525 PORT##nr##_IN, PORT##nr##_IN_PD)
526
527#define PORT_DATA_I_PU(nr) \
528 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
529 PORT##nr##_IN, PORT##nr##_IN_PU)
530
531#define PORT_DATA_I_PU_PD(nr) \
532 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
533 PORT##nr##_IN, PORT##nr##_IN_PD, \
534 PORT##nr##_IN_PU)
535
536#define PORT_DATA_O(nr) \
537 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
538 PORT##nr##_OUT)
539
540#define PORT_DATA_IO(nr) \
541 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
542 PORT##nr##_OUT, PORT##nr##_IN)
543
544#define PORT_DATA_IO_PD(nr) \
545 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
546 PORT##nr##_OUT, PORT##nr##_IN, \
547 PORT##nr##_IN_PD)
548
549#define PORT_DATA_IO_PU(nr) \
550 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
551 PORT##nr##_OUT, PORT##nr##_IN, \
552 PORT##nr##_IN_PU)
553
554#define PORT_DATA_IO_PU_PD(nr) \
555 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
556 PORT##nr##_OUT, PORT##nr##_IN, \
557 PORT##nr##_IN_PD, PORT##nr##_IN_PU)
558
559static pinmux_enum_t pinmux_data[] = { 516static pinmux_enum_t pinmux_data[] = {
560 /* specify valid pin states for each pin in GPIO mode */ 517 /* specify valid pin states for each pin in GPIO mode */
561 518
@@ -1561,6 +1518,24 @@ static pinmux_enum_t pinmux_data[] = {
1561 MSEL4CR_MSEL15_0), 1518 MSEL4CR_MSEL15_0),
1562 PINMUX_DATA(MMCCMD1_PU_MARK, PORT297_FN2, PORT297_IN_PU, 1519 PINMUX_DATA(MMCCMD1_PU_MARK, PORT297_FN2, PORT297_IN_PU,
1563 MSEL4CR_MSEL15_1), 1520 MSEL4CR_MSEL15_1),
1521
1522 PINMUX_DATA(MMCD0_0_PU_MARK,
1523 PORT271_FN1, PORT271_IN_PU, MSEL4CR_MSEL15_0),
1524 PINMUX_DATA(MMCD0_1_PU_MARK,
1525 PORT272_FN1, PORT272_IN_PU, MSEL4CR_MSEL15_0),
1526 PINMUX_DATA(MMCD0_2_PU_MARK,
1527 PORT273_FN1, PORT273_IN_PU, MSEL4CR_MSEL15_0),
1528 PINMUX_DATA(MMCD0_3_PU_MARK,
1529 PORT274_FN1, PORT274_IN_PU, MSEL4CR_MSEL15_0),
1530 PINMUX_DATA(MMCD0_4_PU_MARK,
1531 PORT275_FN1, PORT275_IN_PU, MSEL4CR_MSEL15_0),
1532 PINMUX_DATA(MMCD0_5_PU_MARK,
1533 PORT276_FN1, PORT276_IN_PU, MSEL4CR_MSEL15_0),
1534 PINMUX_DATA(MMCD0_6_PU_MARK,
1535 PORT277_FN1, PORT277_IN_PU, MSEL4CR_MSEL15_0),
1536 PINMUX_DATA(MMCD0_7_PU_MARK,
1537 PORT278_FN1, PORT278_IN_PU, MSEL4CR_MSEL15_0),
1538
1564 PINMUX_DATA(FSIBISLD_PU_MARK, PORT39_FN1, PORT39_IN_PU), 1539 PINMUX_DATA(FSIBISLD_PU_MARK, PORT39_FN1, PORT39_IN_PU),
1565 PINMUX_DATA(FSIACK_PU_MARK, PORT49_FN1, PORT49_IN_PU), 1540 PINMUX_DATA(FSIACK_PU_MARK, PORT49_FN1, PORT49_IN_PU),
1566 PINMUX_DATA(FSIAILR_PU_MARK, PORT50_FN5, PORT50_IN_PU), 1541 PINMUX_DATA(FSIAILR_PU_MARK, PORT50_FN5, PORT50_IN_PU),
@@ -1568,12 +1543,8 @@ static pinmux_enum_t pinmux_data[] = {
1568 PINMUX_DATA(FSIAISLD_PU_MARK, PORT55_FN1, PORT55_IN_PU), 1543 PINMUX_DATA(FSIAISLD_PU_MARK, PORT55_FN1, PORT55_IN_PU),
1569}; 1544};
1570 1545
1571#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA)
1572#define GPIO_PORT_310() _310(_GPIO_PORT, , unused)
1573#define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK)
1574
1575static struct pinmux_gpio pinmux_gpios[] = { 1546static struct pinmux_gpio pinmux_gpios[] = {
1576 GPIO_PORT_310(), 1547 GPIO_PORT_ALL(),
1577 1548
1578 /* Table 25-1 (Functions 0-7) */ 1549 /* Table 25-1 (Functions 0-7) */
1579 GPIO_FN(VBUS_0), 1550 GPIO_FN(VBUS_0),
@@ -2236,24 +2207,20 @@ static struct pinmux_gpio pinmux_gpios[] = {
2236 GPIO_FN(SDHICMD2_PU), 2207 GPIO_FN(SDHICMD2_PU),
2237 GPIO_FN(MMCCMD0_PU), 2208 GPIO_FN(MMCCMD0_PU),
2238 GPIO_FN(MMCCMD1_PU), 2209 GPIO_FN(MMCCMD1_PU),
2210 GPIO_FN(MMCD0_0_PU),
2211 GPIO_FN(MMCD0_1_PU),
2212 GPIO_FN(MMCD0_2_PU),
2213 GPIO_FN(MMCD0_3_PU),
2214 GPIO_FN(MMCD0_4_PU),
2215 GPIO_FN(MMCD0_5_PU),
2216 GPIO_FN(MMCD0_6_PU),
2217 GPIO_FN(MMCD0_7_PU),
2239 GPIO_FN(FSIACK_PU), 2218 GPIO_FN(FSIACK_PU),
2240 GPIO_FN(FSIAILR_PU), 2219 GPIO_FN(FSIAILR_PU),
2241 GPIO_FN(FSIAIBT_PU), 2220 GPIO_FN(FSIAIBT_PU),
2242 GPIO_FN(FSIAISLD_PU), 2221 GPIO_FN(FSIAISLD_PU),
2243}; 2222};
2244 2223
2245#define PORTCR(nr, reg) \
2246 { PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
2247 0, \
2248 /*0001*/ PORT##nr##_OUT , \
2249 /*0010*/ PORT##nr##_IN , 0, 0, 0, 0, 0, 0, 0, \
2250 /*1010*/ PORT##nr##_IN_PD, 0, 0, 0, \
2251 /*1110*/ PORT##nr##_IN_PU, 0, \
2252 PORT##nr##_FN0, PORT##nr##_FN1, PORT##nr##_FN2, \
2253 PORT##nr##_FN3, PORT##nr##_FN4, PORT##nr##_FN5, \
2254 PORT##nr##_FN6, PORT##nr##_FN7, 0, 0, 0, 0, 0, 0, 0, 0 } \
2255 }
2256
2257static struct pinmux_cfg_reg pinmux_config_regs[] = { 2224static struct pinmux_cfg_reg pinmux_config_regs[] = {
2258 PORTCR(0, 0xe6050000), /* PORT0CR */ 2225 PORTCR(0, 0xe6050000), /* PORT0CR */
2259 PORTCR(1, 0xe6050001), /* PORT1CR */ 2226 PORTCR(1, 0xe6050001), /* PORT1CR */
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index 79612737c5b2..0a5b22942fd3 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -402,22 +402,18 @@ static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2)
402 402
403#ifdef CONFIG_CPU_IDLE 403#ifdef CONFIG_CPU_IDLE
404 404
405static void sh7372_cpuidle_setup(struct cpuidle_device *dev) 405static void sh7372_cpuidle_setup(struct cpuidle_driver *drv)
406{ 406{
407 struct cpuidle_state *state; 407 struct cpuidle_state *state = &drv->states[drv->state_count];
408 int i = dev->state_count;
409 408
410 state = &dev->states[i];
411 snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); 409 snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
412 strncpy(state->desc, "Core Standby Mode", CPUIDLE_DESC_LEN); 410 strncpy(state->desc, "Core Standby Mode", CPUIDLE_DESC_LEN);
413 state->exit_latency = 10; 411 state->exit_latency = 10;
414 state->target_residency = 20 + 10; 412 state->target_residency = 20 + 10;
415 state->power_usage = 1; /* perhaps not */ 413 state->flags = CPUIDLE_FLAG_TIME_VALID;
416 state->flags = 0; 414 shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_core_standby;
417 state->flags |= CPUIDLE_FLAG_TIME_VALID;
418 shmobile_cpuidle_modes[i] = sh7372_enter_core_standby;
419 415
420 dev->state_count = i + 1; 416 drv->state_count++;
421} 417}
422 418
423static void sh7372_cpuidle_init(void) 419static void sh7372_cpuidle_init(void)
diff --git a/arch/arm/mach-tegra/board-dt.c b/arch/arm/mach-tegra/board-dt.c
index d368f8dafcfd..74743ad3d2d3 100644
--- a/arch/arm/mach-tegra/board-dt.c
+++ b/arch/arm/mach-tegra/board-dt.c
@@ -101,6 +101,13 @@ static void __init tegra_dt_init(void)
101 101
102 tegra_clk_init_from_table(tegra_dt_clk_init_table); 102 tegra_clk_init_from_table(tegra_dt_clk_init_table);
103 103
104 /*
105 * Finished with the static registrations now; fill in the missing
106 * devices
107 */
108 of_platform_populate(NULL, tegra_dt_match_table,
109 tegra20_auxdata_lookup, NULL);
110
104 for (i = 0; i < ARRAY_SIZE(pinmux_configs); i++) { 111 for (i = 0; i < ARRAY_SIZE(pinmux_configs); i++) {
105 if (of_machine_is_compatible(pinmux_configs[i].machine)) { 112 if (of_machine_is_compatible(pinmux_configs[i].machine)) {
106 pinmux_configs[i].init(); 113 pinmux_configs[i].init();
@@ -110,12 +117,6 @@ static void __init tegra_dt_init(void)
110 117
111 WARN(i == ARRAY_SIZE(pinmux_configs), 118 WARN(i == ARRAY_SIZE(pinmux_configs),
112 "Unknown platform! Pinmuxing not initialized\n"); 119 "Unknown platform! Pinmuxing not initialized\n");
113
114 /*
115 * Finished with the static registrations now; fill in the missing
116 * devices
117 */
118 of_platform_populate(NULL, tegra_dt_match_table, tegra20_auxdata_lookup, NULL);
119} 120}
120 121
121static const char * tegra_dt_board_compat[] = { 122static const char * tegra_dt_board_compat[] = {
diff --git a/arch/arm/mach-tegra/board-harmony-pinmux.c b/arch/arm/mach-tegra/board-harmony-pinmux.c
index e99b45618cd0..7a4a26d5174c 100644
--- a/arch/arm/mach-tegra/board-harmony-pinmux.c
+++ b/arch/arm/mach-tegra/board-harmony-pinmux.c
@@ -16,6 +16,8 @@
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/of.h>
20
19#include <mach/pinmux.h> 21#include <mach/pinmux.h>
20 22
21#include "gpio-names.h" 23#include "gpio-names.h"
@@ -161,7 +163,9 @@ static struct tegra_gpio_table gpio_table[] = {
161 163
162void harmony_pinmux_init(void) 164void harmony_pinmux_init(void)
163{ 165{
164 platform_add_devices(pinmux_devices, ARRAY_SIZE(pinmux_devices)); 166 if (!of_machine_is_compatible("nvidia,tegra20"))
167 platform_add_devices(pinmux_devices,
168 ARRAY_SIZE(pinmux_devices));
165 169
166 tegra_pinmux_config_table(harmony_pinmux, ARRAY_SIZE(harmony_pinmux)); 170 tegra_pinmux_config_table(harmony_pinmux, ARRAY_SIZE(harmony_pinmux));
167 171
diff --git a/arch/arm/mach-tegra/board-paz00-pinmux.c b/arch/arm/mach-tegra/board-paz00-pinmux.c
index fb20894862b0..be30e215f4b7 100644
--- a/arch/arm/mach-tegra/board-paz00-pinmux.c
+++ b/arch/arm/mach-tegra/board-paz00-pinmux.c
@@ -16,6 +16,8 @@
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/of.h>
20
19#include <mach/pinmux.h> 21#include <mach/pinmux.h>
20 22
21#include "gpio-names.h" 23#include "gpio-names.h"
@@ -158,7 +160,9 @@ static struct tegra_gpio_table gpio_table[] = {
158 160
159void paz00_pinmux_init(void) 161void paz00_pinmux_init(void)
160{ 162{
161 platform_add_devices(pinmux_devices, ARRAY_SIZE(pinmux_devices)); 163 if (!of_machine_is_compatible("nvidia,tegra20"))
164 platform_add_devices(pinmux_devices,
165 ARRAY_SIZE(pinmux_devices));
162 166
163 tegra_pinmux_config_table(paz00_pinmux, ARRAY_SIZE(paz00_pinmux)); 167 tegra_pinmux_config_table(paz00_pinmux, ARRAY_SIZE(paz00_pinmux));
164 168
diff --git a/arch/arm/mach-tegra/board-seaboard-pinmux.c b/arch/arm/mach-tegra/board-seaboard-pinmux.c
index fbce31daa3c9..b1c2972f62fe 100644
--- a/arch/arm/mach-tegra/board-seaboard-pinmux.c
+++ b/arch/arm/mach-tegra/board-seaboard-pinmux.c
@@ -16,6 +16,7 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/of.h>
19 20
20#include <mach/pinmux.h> 21#include <mach/pinmux.h>
21#include <mach/pinmux-t2.h> 22#include <mach/pinmux-t2.h>
@@ -191,6 +192,7 @@ static struct tegra_gpio_table common_gpio_table[] = {
191 { .gpio = TEGRA_GPIO_SD2_POWER, .enable = true }, 192 { .gpio = TEGRA_GPIO_SD2_POWER, .enable = true },
192 { .gpio = TEGRA_GPIO_LIDSWITCH, .enable = true }, 193 { .gpio = TEGRA_GPIO_LIDSWITCH, .enable = true },
193 { .gpio = TEGRA_GPIO_POWERKEY, .enable = true }, 194 { .gpio = TEGRA_GPIO_POWERKEY, .enable = true },
195 { .gpio = TEGRA_GPIO_HP_DET, .enable = true },
194 { .gpio = TEGRA_GPIO_ISL29018_IRQ, .enable = true }, 196 { .gpio = TEGRA_GPIO_ISL29018_IRQ, .enable = true },
195 { .gpio = TEGRA_GPIO_CDC_IRQ, .enable = true }, 197 { .gpio = TEGRA_GPIO_CDC_IRQ, .enable = true },
196 { .gpio = TEGRA_GPIO_USB1, .enable = true }, 198 { .gpio = TEGRA_GPIO_USB1, .enable = true },
@@ -218,7 +220,9 @@ static void __init update_pinmux(struct tegra_pingroup_config *newtbl, int size)
218 220
219void __init seaboard_common_pinmux_init(void) 221void __init seaboard_common_pinmux_init(void)
220{ 222{
221 platform_add_devices(pinmux_devices, ARRAY_SIZE(pinmux_devices)); 223 if (!of_machine_is_compatible("nvidia,tegra20"))
224 platform_add_devices(pinmux_devices,
225 ARRAY_SIZE(pinmux_devices));
222 226
223 tegra_pinmux_config_table(seaboard_pinmux, ARRAY_SIZE(seaboard_pinmux)); 227 tegra_pinmux_config_table(seaboard_pinmux, ARRAY_SIZE(seaboard_pinmux));
224 228
diff --git a/arch/arm/mach-tegra/board-trimslice-pinmux.c b/arch/arm/mach-tegra/board-trimslice-pinmux.c
index 4969dd28a04c..7ab719d46da0 100644
--- a/arch/arm/mach-tegra/board-trimslice-pinmux.c
+++ b/arch/arm/mach-tegra/board-trimslice-pinmux.c
@@ -16,6 +16,7 @@
16#include <linux/gpio.h> 16#include <linux/gpio.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/of.h>
19 20
20#include <mach/pinmux.h> 21#include <mach/pinmux.h>
21 22
@@ -157,7 +158,9 @@ static struct tegra_gpio_table gpio_table[] = {
157 158
158void __init trimslice_pinmux_init(void) 159void __init trimslice_pinmux_init(void)
159{ 160{
160 platform_add_devices(pinmux_devices, ARRAY_SIZE(pinmux_devices)); 161 if (!of_machine_is_compatible("nvidia,tegra20"))
162 platform_add_devices(pinmux_devices,
163 ARRAY_SIZE(pinmux_devices));
161 tegra_pinmux_config_table(trimslice_pinmux, ARRAY_SIZE(trimslice_pinmux)); 164 tegra_pinmux_config_table(trimslice_pinmux, ARRAY_SIZE(trimslice_pinmux));
162 tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table)); 165 tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table));
163} 166}
diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig
index a08a95107a63..b3a1f2b3ada3 100644
--- a/arch/arm/plat-mxc/Kconfig
+++ b/arch/arm/plat-mxc/Kconfig
@@ -10,7 +10,7 @@ choice
10 10
11config ARCH_IMX_V4_V5 11config ARCH_IMX_V4_V5
12 bool "i.MX1, i.MX21, i.MX25, i.MX27" 12 bool "i.MX1, i.MX21, i.MX25, i.MX27"
13 select AUTO_ZRELADDR 13 select AUTO_ZRELADDR if !ZBOOT_ROM
14 select ARM_PATCH_PHYS_VIRT 14 select ARM_PATCH_PHYS_VIRT
15 help 15 help
16 This enables support for systems based on the Freescale i.MX ARMv4 16 This enables support for systems based on the Freescale i.MX ARMv4
@@ -26,7 +26,7 @@ config ARCH_IMX_V6_V7
26 26
27config ARCH_MX5 27config ARCH_MX5
28 bool "i.MX50, i.MX51, i.MX53" 28 bool "i.MX50, i.MX51, i.MX53"
29 select AUTO_ZRELADDR 29 select AUTO_ZRELADDR if !ZBOOT_ROM
30 select ARM_PATCH_PHYS_VIRT 30 select ARM_PATCH_PHYS_VIRT
31 help 31 help
32 This enables support for machines using Freescale's i.MX50 and i.MX53 32 This enables support for machines using Freescale's i.MX50 and i.MX53
diff --git a/arch/arm/plat-mxc/avic.c b/arch/arm/plat-mxc/avic.c
index 8875fb415f68..55f15699a383 100644
--- a/arch/arm/plat-mxc/avic.c
+++ b/arch/arm/plat-mxc/avic.c
@@ -22,6 +22,7 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <mach/common.h> 23#include <mach/common.h>
24#include <asm/mach/irq.h> 24#include <asm/mach/irq.h>
25#include <asm/exception.h>
25#include <mach/hardware.h> 26#include <mach/hardware.h>
26 27
27#include "irq-common.h" 28#include "irq-common.h"
diff --git a/arch/arm/plat-mxc/gic.c b/arch/arm/plat-mxc/gic.c
index b3b8eed263b8..12f8f8109010 100644
--- a/arch/arm/plat-mxc/gic.c
+++ b/arch/arm/plat-mxc/gic.c
@@ -28,21 +28,14 @@ asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
28 if (irqnr == 1023) 28 if (irqnr == 1023)
29 break; 29 break;
30 30
31 if (irqnr > 29 && irqnr < 1021) 31 if (irqnr > 15 && irqnr < 1021)
32 handle_IRQ(irqnr, regs); 32 handle_IRQ(irqnr, regs);
33#ifdef CONFIG_SMP 33#ifdef CONFIG_SMP
34 else if (irqnr < 16) { 34 else {
35 writel_relaxed(irqstat, gic_cpu_base_addr + 35 writel_relaxed(irqstat, gic_cpu_base_addr +
36 GIC_CPU_EOI); 36 GIC_CPU_EOI);
37 handle_IPI(irqnr, regs); 37 handle_IPI(irqnr, regs);
38 } 38 }
39#endif 39#endif
40#ifdef CONFIG_LOCAL_TIMERS
41 else if (irqnr == 29) {
42 writel_relaxed(irqstat, gic_cpu_base_addr +
43 GIC_CPU_EOI);
44 handle_local_timer(regs);
45 }
46#endif
47 } while (1); 40 } while (1);
48} 41}
diff --git a/arch/arm/plat-mxc/include/mach/entry-macro.S b/arch/arm/plat-mxc/include/mach/entry-macro.S
index 9fe0dfcf4e7e..ca5cf26a04b1 100644
--- a/arch/arm/plat-mxc/include/mach/entry-macro.S
+++ b/arch/arm/plat-mxc/include/mach/entry-macro.S
@@ -25,6 +25,3 @@
25 25
26 .macro test_for_ipi, irqnr, irqstat, base, tmp 26 .macro test_for_ipi, irqnr, irqstat, base, tmp
27 .endm 27 .endm
28
29 .macro test_for_ltirq, irqnr, irqstat, base, tmp
30 .endm
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c
index e993a184189a..a3c164c7ba82 100644
--- a/arch/arm/plat-mxc/tzic.c
+++ b/arch/arm/plat-mxc/tzic.c
@@ -17,6 +17,7 @@
17#include <linux/io.h> 17#include <linux/io.h>
18 18
19#include <asm/mach/irq.h> 19#include <asm/mach/irq.h>
20#include <asm/exception.h>
20 21
21#include <mach/hardware.h> 22#include <mach/hardware.h>
22#include <mach/common.h> 23#include <mach/common.h>
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 2def4e1990ed..af3b92be8459 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -35,6 +35,7 @@
35 * 675 Mass Ave, Cambridge, MA 02139, USA. 35 * 675 Mass Ave, Cambridge, MA 02139, USA.
36 */ 36 */
37 37
38#include <linux/module.h>
38#include <linux/io.h> 39#include <linux/io.h>
39#include <linux/slab.h> 40#include <linux/slab.h>
40#include <linux/err.h> 41#include <linux/err.h>
diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c
index 679cbd49c019..db071bc71c4d 100644
--- a/arch/arm/plat-omap/i2c.c
+++ b/arch/arm/plat-omap/i2c.c
@@ -184,7 +184,7 @@ static inline int omap2_i2c_add_bus(int bus_id)
184 NULL, 0, 0); 184 NULL, 0, 0);
185 WARN(IS_ERR(pdev), "Could not build omap_device for %s\n", name); 185 WARN(IS_ERR(pdev), "Could not build omap_device for %s\n", name);
186 186
187 return PTR_ERR(pdev); 187 return PTR_RET(pdev);
188} 188}
189#else 189#else
190static inline int omap2_i2c_add_bus(int bus_id) 190static inline int omap2_i2c_add_bus(int bus_id)
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 2f9026942229..408a12f79205 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -399,6 +399,13 @@ void omap2_check_revision(void);
399 399
400/* 400/*
401 * Runtime detection of OMAP3 features 401 * Runtime detection of OMAP3 features
402 *
403 * OMAP3_HAS_IO_CHAIN_CTRL: Some later members of the OMAP3 chip
404 * family have OS-level control over the I/O chain clock. This is
405 * to avoid a window during which wakeups could potentially be lost
406 * during powerdomain transitions. If this bit is set, it
407 * indicates that the chip does support OS-level control of this
408 * feature.
402 */ 409 */
403extern u32 omap_features; 410extern u32 omap_features;
404 411
@@ -410,9 +417,10 @@ extern u32 omap_features;
410#define OMAP3_HAS_192MHZ_CLK BIT(5) 417#define OMAP3_HAS_192MHZ_CLK BIT(5)
411#define OMAP3_HAS_IO_WAKEUP BIT(6) 418#define OMAP3_HAS_IO_WAKEUP BIT(6)
412#define OMAP3_HAS_SDRC BIT(7) 419#define OMAP3_HAS_SDRC BIT(7)
413#define OMAP4_HAS_MPU_1GHZ BIT(8) 420#define OMAP3_HAS_IO_CHAIN_CTRL BIT(8)
414#define OMAP4_HAS_MPU_1_2GHZ BIT(9) 421#define OMAP4_HAS_MPU_1GHZ BIT(9)
415#define OMAP4_HAS_MPU_1_5GHZ BIT(10) 422#define OMAP4_HAS_MPU_1_2GHZ BIT(10)
423#define OMAP4_HAS_MPU_1_5GHZ BIT(11)
416 424
417 425
418#define OMAP3_HAS_FEATURE(feat,flag) \ 426#define OMAP3_HAS_FEATURE(feat,flag) \
@@ -429,12 +437,11 @@ OMAP3_HAS_FEATURE(isp, ISP)
429OMAP3_HAS_FEATURE(192mhz_clk, 192MHZ_CLK) 437OMAP3_HAS_FEATURE(192mhz_clk, 192MHZ_CLK)
430OMAP3_HAS_FEATURE(io_wakeup, IO_WAKEUP) 438OMAP3_HAS_FEATURE(io_wakeup, IO_WAKEUP)
431OMAP3_HAS_FEATURE(sdrc, SDRC) 439OMAP3_HAS_FEATURE(sdrc, SDRC)
440OMAP3_HAS_FEATURE(io_chain_ctrl, IO_CHAIN_CTRL)
432 441
433/* 442/*
434 * Runtime detection of OMAP4 features 443 * Runtime detection of OMAP4 features
435 */ 444 */
436extern u32 omap_features;
437
438#define OMAP4_HAS_FEATURE(feat, flag) \ 445#define OMAP4_HAS_FEATURE(feat, flag) \
439static inline unsigned int omap4_has_ ##feat(void) \ 446static inline unsigned int omap4_has_ ##feat(void) \
440{ \ 447{ \
diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h
index d11025e6e7a4..9418f00b6c38 100644
--- a/arch/arm/plat-omap/include/plat/dmtimer.h
+++ b/arch/arm/plat-omap/include/plat/dmtimer.h
@@ -104,7 +104,7 @@ struct dmtimer_platform_data {
104 104
105 bool loses_context; 105 bool loses_context;
106 106
107 u32 (*get_context_loss_count)(struct device *dev); 107 int (*get_context_loss_count)(struct device *dev);
108}; 108};
109 109
110struct omap_dm_timer *omap_dm_timer_request(void); 110struct omap_dm_timer *omap_dm_timer_request(void);
@@ -279,7 +279,7 @@ struct omap_dm_timer {
279 struct platform_device *pdev; 279 struct platform_device *pdev;
280 struct list_head node; 280 struct list_head node;
281 281
282 u32 (*get_context_loss_count)(struct device *dev); 282 int (*get_context_loss_count)(struct device *dev);
283}; 283};
284 284
285int omap_dm_timer_prepare(struct omap_dm_timer *timer); 285int omap_dm_timer_prepare(struct omap_dm_timer *timer);
diff --git a/arch/arm/plat-omap/include/plat/omap-alsa.h b/arch/arm/plat-omap/include/plat/omap-alsa.h
deleted file mode 100644
index b53055b390d0..000000000000
--- a/arch/arm/plat-omap/include/plat/omap-alsa.h
+++ /dev/null
@@ -1,123 +0,0 @@
1/*
2 * arch/arm/plat-omap/include/mach/omap-alsa.h
3 *
4 * Alsa Driver for AIC23 and TSC2101 codecs on OMAP platform boards.
5 *
6 * Copyright (C) 2006 Mika Laitio <lamikr@cc.jyu.fi>
7 *
8 * Copyright (C) 2005 Instituto Nokia de Tecnologia - INdT - Manaus Brazil
9 * Written by Daniel Petrini, David Cohen, Anderson Briglia
10 * {daniel.petrini, david.cohen, anderson.briglia}@indt.org.br
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
24 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * You should have received a copy of the GNU General Public License along
29 * with this program; if not, write to the Free Software Foundation, Inc.,
30 * 675 Mass Ave, Cambridge, MA 02139, USA.
31 *
32 * History
33 * -------
34 *
35 * 2005/07/25 INdT-10LE Kernel Team - Alsa driver for omap osk,
36 * original version based in sa1100 driver
37 * and omap oss driver.
38 */
39
40#ifndef __OMAP_ALSA_H
41#define __OMAP_ALSA_H
42
43#include <plat/dma.h>
44#include <sound/core.h>
45#include <sound/pcm.h>
46#include <plat/mcbsp.h>
47#include <linux/platform_device.h>
48
49#define DMA_BUF_SIZE (1024 * 8)
50
51/*
52 * Buffer management for alsa and dma
53 */
54struct audio_stream {
55 char *id; /* identification string */
56 int stream_id; /* numeric identification */
57 int dma_dev; /* dma number of that device */
58 int *lch; /* Chain of channels this stream is linked to */
59 char started; /* to store if the chain was started or not */
60 int dma_q_head; /* DMA Channel Q Head */
61 int dma_q_tail; /* DMA Channel Q Tail */
62 char dma_q_count; /* DMA Channel Q Count */
63 int active:1; /* we are using this stream for transfer now */
64 int period; /* current transfer period */
65 int periods; /* current count of periods registerd in the DMA engine */
66 spinlock_t dma_lock; /* for locking in DMA operations */
67 struct snd_pcm_substream *stream; /* the pcm stream */
68 unsigned linked:1; /* dma channels linked */
69 int offset; /* store start position of the last period in the alsa buffer */
70 int (*hw_start)(void); /* interface to start HW interface, e.g. McBSP */
71 int (*hw_stop)(void); /* interface to stop HW interface, e.g. McBSP */
72};
73
74/*
75 * Alsa card structure for aic23
76 */
77struct snd_card_omap_codec {
78 struct snd_card *card;
79 struct snd_pcm *pcm;
80 long samplerate;
81 struct audio_stream s[2]; /* playback & capture */
82};
83
84/* Codec specific information and function pointers.
85 * Codec (omap-alsa-aic23.c and omap-alsa-tsc2101.c)
86 * are responsible for defining the function pointers.
87 */
88struct omap_alsa_codec_config {
89 char *name;
90 struct omap_mcbsp_reg_cfg *mcbsp_regs_alsa;
91 struct snd_pcm_hw_constraint_list *hw_constraints_rates;
92 struct snd_pcm_hardware *snd_omap_alsa_playback;
93 struct snd_pcm_hardware *snd_omap_alsa_capture;
94 void (*codec_configure_dev)(void);
95 void (*codec_set_samplerate)(long);
96 void (*codec_clock_setup)(void);
97 int (*codec_clock_on)(void);
98 int (*codec_clock_off)(void);
99 int (*get_default_samplerate)(void);
100};
101
102/*********** Mixer function prototypes *************************/
103int snd_omap_mixer(struct snd_card_omap_codec *);
104void snd_omap_init_mixer(void);
105
106#ifdef CONFIG_PM
107void snd_omap_suspend_mixer(void);
108void snd_omap_resume_mixer(void);
109#endif
110
111int snd_omap_alsa_post_probe(struct platform_device *pdev, struct omap_alsa_codec_config *config);
112int snd_omap_alsa_remove(struct platform_device *pdev);
113#ifdef CONFIG_PM
114int snd_omap_alsa_suspend(struct platform_device *pdev, pm_message_t state);
115int snd_omap_alsa_resume(struct platform_device *pdev);
116#else
117#define snd_omap_alsa_suspend NULL
118#define snd_omap_alsa_resume NULL
119#endif
120
121void callback_omap_alsa_sound_dma(void *);
122
123#endif
diff --git a/arch/arm/plat-omap/include/plat/omap-pm.h b/arch/arm/plat-omap/include/plat/omap-pm.h
index 0840df813f4f..67faa7b8fe92 100644
--- a/arch/arm/plat-omap/include/plat/omap-pm.h
+++ b/arch/arm/plat-omap/include/plat/omap-pm.h
@@ -342,9 +342,9 @@ unsigned long omap_pm_cpu_get_freq(void);
342 * driver must restore device context. If the number of context losses 342 * driver must restore device context. If the number of context losses
343 * exceeds the maximum positive integer, the function will wrap to 0 and 343 * exceeds the maximum positive integer, the function will wrap to 0 and
344 * continue counting. Returns the number of context losses for this device, 344 * continue counting. Returns the number of context losses for this device,
345 * or zero upon error. 345 * or negative value upon error.
346 */ 346 */
347u32 omap_pm_get_dev_context_loss_count(struct device *dev); 347int omap_pm_get_dev_context_loss_count(struct device *dev);
348 348
349void omap_pm_enable_off_mode(void); 349void omap_pm_enable_off_mode(void);
350void omap_pm_disable_off_mode(void); 350void omap_pm_disable_off_mode(void);
diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h
index 12c5b0c345bf..51423d2727a5 100644
--- a/arch/arm/plat-omap/include/plat/omap_device.h
+++ b/arch/arm/plat-omap/include/plat/omap_device.h
@@ -107,7 +107,7 @@ struct device *omap_device_get_by_hwmod_name(const char *oh_name);
107int omap_device_align_pm_lat(struct platform_device *pdev, 107int omap_device_align_pm_lat(struct platform_device *pdev,
108 u32 new_wakeup_lat_limit); 108 u32 new_wakeup_lat_limit);
109struct powerdomain *omap_device_get_pwrdm(struct omap_device *od); 109struct powerdomain *omap_device_get_pwrdm(struct omap_device *od);
110u32 omap_device_get_context_loss_count(struct platform_device *pdev); 110int omap_device_get_context_loss_count(struct platform_device *pdev);
111 111
112/* Other */ 112/* Other */
113 113
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 5419f1a2aaa4..8b372ede17c1 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -600,7 +600,7 @@ int omap_hwmod_for_each_by_class(const char *classname,
600 void *user); 600 void *user);
601 601
602int omap_hwmod_set_postsetup_state(struct omap_hwmod *oh, u8 state); 602int omap_hwmod_set_postsetup_state(struct omap_hwmod *oh, u8 state);
603u32 omap_hwmod_get_context_loss_count(struct omap_hwmod *oh); 603int omap_hwmod_get_context_loss_count(struct omap_hwmod *oh);
604 604
605int omap_hwmod_no_setup_reset(struct omap_hwmod *oh); 605int omap_hwmod_no_setup_reset(struct omap_hwmod *oh);
606 606
diff --git a/arch/arm/plat-omap/omap-pm-noop.c b/arch/arm/plat-omap/omap-pm-noop.c
index b0471bb2d47d..3dc3801aace4 100644
--- a/arch/arm/plat-omap/omap-pm-noop.c
+++ b/arch/arm/plat-omap/omap-pm-noop.c
@@ -27,7 +27,7 @@
27#include <plat/omap_device.h> 27#include <plat/omap_device.h>
28 28
29static bool off_mode_enabled; 29static bool off_mode_enabled;
30static u32 dummy_context_loss_counter; 30static int dummy_context_loss_counter;
31 31
32/* 32/*
33 * Device-driver-originated constraints (via board-*.c files) 33 * Device-driver-originated constraints (via board-*.c files)
@@ -311,22 +311,32 @@ void omap_pm_disable_off_mode(void)
311 311
312#ifdef CONFIG_ARCH_OMAP2PLUS 312#ifdef CONFIG_ARCH_OMAP2PLUS
313 313
314u32 omap_pm_get_dev_context_loss_count(struct device *dev) 314int omap_pm_get_dev_context_loss_count(struct device *dev)
315{ 315{
316 struct platform_device *pdev = to_platform_device(dev); 316 struct platform_device *pdev = to_platform_device(dev);
317 u32 count; 317 int count;
318 318
319 if (WARN_ON(!dev)) 319 if (WARN_ON(!dev))
320 return 0; 320 return -ENODEV;
321 321
322 if (dev->parent == &omap_device_parent) { 322 if (dev->parent == &omap_device_parent) {
323 count = omap_device_get_context_loss_count(pdev); 323 count = omap_device_get_context_loss_count(pdev);
324 } else { 324 } else {
325 WARN_ONCE(off_mode_enabled, "omap_pm: using dummy context loss counter; device %s should be converted to omap_device", 325 WARN_ONCE(off_mode_enabled, "omap_pm: using dummy context loss counter; device %s should be converted to omap_device",
326 dev_name(dev)); 326 dev_name(dev));
327 if (off_mode_enabled) 327
328 dummy_context_loss_counter++;
329 count = dummy_context_loss_counter; 328 count = dummy_context_loss_counter;
329
330 if (off_mode_enabled) {
331 count++;
332 /*
333 * Context loss count has to be a non-negative value.
334 * Clear the sign bit to get a value range from 0 to
335 * INT_MAX.
336 */
337 count &= INT_MAX;
338 dummy_context_loss_counter = count;
339 }
330 } 340 }
331 341
332 pr_debug("OMAP PM: context loss count for dev %s = %d\n", 342 pr_debug("OMAP PM: context loss count for dev %s = %d\n",
@@ -337,7 +347,7 @@ u32 omap_pm_get_dev_context_loss_count(struct device *dev)
337 347
338#else 348#else
339 349
340u32 omap_pm_get_dev_context_loss_count(struct device *dev) 350int omap_pm_get_dev_context_loss_count(struct device *dev)
341{ 351{
342 return dummy_context_loss_counter; 352 return dummy_context_loss_counter;
343} 353}
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index cd90bedd9306..e8d98693d2dd 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -78,6 +78,7 @@
78#undef DEBUG 78#undef DEBUG
79 79
80#include <linux/kernel.h> 80#include <linux/kernel.h>
81#include <linux/export.h>
81#include <linux/platform_device.h> 82#include <linux/platform_device.h>
82#include <linux/slab.h> 83#include <linux/slab.h>
83#include <linux/err.h> 84#include <linux/err.h>
@@ -426,7 +427,7 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
426 * return the context loss counter for that hwmod, otherwise return 427 * return the context loss counter for that hwmod, otherwise return
427 * zero. 428 * zero.
428 */ 429 */
429u32 omap_device_get_context_loss_count(struct platform_device *pdev) 430int omap_device_get_context_loss_count(struct platform_device *pdev)
430{ 431{
431 struct omap_device *od; 432 struct omap_device *od;
432 u32 ret = 0; 433 u32 ret = 0;
diff --git a/arch/arm/plat-pxa/include/plat/pxa3xx_nand.h b/arch/arm/plat-pxa/include/plat/pxa3xx_nand.h
index 442301fe48b4..c42f39f20195 100644
--- a/arch/arm/plat-pxa/include/plat/pxa3xx_nand.h
+++ b/arch/arm/plat-pxa/include/plat/pxa3xx_nand.h
@@ -41,6 +41,19 @@ struct pxa3xx_nand_flash {
41 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */ 41 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
42}; 42};
43 43
44/*
45 * Current pxa3xx_nand controller has two chip select which
46 * both be workable.
47 *
48 * Notice should be taken that:
49 * When you want to use this feature, you should not enable the
50 * keep configuration feature, for two chip select could be
51 * attached with different nand chip. The different page size
52 * and timing requirement make the keep configuration impossible.
53 */
54
55/* The max num of chip select current support */
56#define NUM_CHIP_SELECT (2)
44struct pxa3xx_nand_platform_data { 57struct pxa3xx_nand_platform_data {
45 58
46 /* the data flash bus is shared between the Static Memory 59 /* the data flash bus is shared between the Static Memory
@@ -52,8 +65,11 @@ struct pxa3xx_nand_platform_data {
52 /* allow platform code to keep OBM/bootloader defined NFC config */ 65 /* allow platform code to keep OBM/bootloader defined NFC config */
53 int keep_config; 66 int keep_config;
54 67
55 const struct mtd_partition *parts; 68 /* indicate how many chip selects will be used */
56 unsigned int nr_parts; 69 int num_cs;
70
71 const struct mtd_partition *parts[NUM_CHIP_SELECT];
72 unsigned int nr_parts[NUM_CHIP_SELECT];
57 73
58 const struct pxa3xx_nand_flash * flash; 74 const struct pxa3xx_nand_flash * flash;
59 size_t num_flash; 75 size_t num_flash;
diff --git a/arch/arm/plat-samsung/dma-ops.c b/arch/arm/plat-samsung/dma-ops.c
index 6e3d9abc9e2e..93a994a5dd8f 100644
--- a/arch/arm/plat-samsung/dma-ops.c
+++ b/arch/arm/plat-samsung/dma-ops.c
@@ -14,6 +14,7 @@
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/amba/pl330.h> 15#include <linux/amba/pl330.h>
16#include <linux/scatterlist.h> 16#include <linux/scatterlist.h>
17#include <linux/export.h>
17 18
18#include <mach/dma.h> 19#include <mach/dma.h>
19 20
diff --git a/arch/arm/plat-samsung/s3c-dma-ops.c b/arch/arm/plat-samsung/s3c-dma-ops.c
index 582333c70585..781494912827 100644
--- a/arch/arm/plat-samsung/s3c-dma-ops.c
+++ b/arch/arm/plat-samsung/s3c-dma-ops.c
@@ -14,6 +14,7 @@
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/export.h>
17 18
18#include <mach/dma.h> 19#include <mach/dma.h>
19 20
diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c
index fafed4c38fd2..7c756fb189f7 100644
--- a/arch/avr32/boards/atngw100/setup.c
+++ b/arch/avr32/boards/atngw100/setup.c
@@ -90,11 +90,6 @@ static struct mtd_partition nand_partitions[] = {
90 }, 90 },
91}; 91};
92 92
93static struct mtd_partition *nand_part_info(int size, int *num_partitions)
94{
95 *num_partitions = ARRAY_SIZE(nand_partitions);
96 return nand_partitions;
97}
98 93
99static struct atmel_nand_data atngw100mkii_nand_data __initdata = { 94static struct atmel_nand_data atngw100mkii_nand_data __initdata = {
100 .cle = 21, 95 .cle = 21,
@@ -102,7 +97,8 @@ static struct atmel_nand_data atngw100mkii_nand_data __initdata = {
102 .rdy_pin = GPIO_PIN_PB(28), 97 .rdy_pin = GPIO_PIN_PB(28),
103 .enable_pin = GPIO_PIN_PE(23), 98 .enable_pin = GPIO_PIN_PE(23),
104 .bus_width_16 = true, 99 .bus_width_16 = true,
105 .partition_info = nand_part_info, 100 .parts = nand_partitions,
101 .num_parts = ARRAY_SIZE(nand_partitions),
106}; 102};
107#endif 103#endif
108 104
@@ -113,7 +109,7 @@ struct eth_addr {
113 u8 addr[6]; 109 u8 addr[6];
114}; 110};
115static struct eth_addr __initdata hw_addr[2]; 111static struct eth_addr __initdata hw_addr[2];
116static struct eth_platform_data __initdata eth_data[2]; 112static struct macb_platform_data __initdata eth_data[2];
117 113
118static struct spi_board_info spi0_board_info[] __initdata = { 114static struct spi_board_info spi0_board_info[] __initdata = {
119 { 115 {
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c
index 6ce30fb2ec94..c56ddac85d61 100644
--- a/arch/avr32/boards/atstk1000/atstk1002.c
+++ b/arch/avr32/boards/atstk1000/atstk1002.c
@@ -90,18 +90,13 @@ static struct mtd_partition nand_partitions[] = {
90 }, 90 },
91}; 91};
92 92
93static struct mtd_partition *nand_part_info(int size, int *num_partitions)
94{
95 *num_partitions = ARRAY_SIZE(nand_partitions);
96 return nand_partitions;
97}
98
99static struct atmel_nand_data atstk1006_nand_data __initdata = { 93static struct atmel_nand_data atstk1006_nand_data __initdata = {
100 .cle = 21, 94 .cle = 21,
101 .ale = 22, 95 .ale = 22,
102 .rdy_pin = GPIO_PIN_PB(30), 96 .rdy_pin = GPIO_PIN_PB(30),
103 .enable_pin = GPIO_PIN_PB(29), 97 .enable_pin = GPIO_PIN_PB(29),
104 .partition_info = nand_part_info, 98 .parts = nand_partitions,
99 .num_parts = ARRAY_SIZE(num_partitions),
105}; 100};
106#endif 101#endif
107 102
@@ -110,7 +105,7 @@ struct eth_addr {
110}; 105};
111 106
112static struct eth_addr __initdata hw_addr[2]; 107static struct eth_addr __initdata hw_addr[2];
113static struct eth_platform_data __initdata eth_data[2] = { 108static struct macb_platform_data __initdata eth_data[2] = {
114 { 109 {
115 /* 110 /*
116 * The MDIO pullups on STK1000 are a bit too weak for 111 * The MDIO pullups on STK1000 are a bit too weak for
diff --git a/arch/avr32/boards/favr-32/setup.c b/arch/avr32/boards/favr-32/setup.c
index 86fab77a5a00..27bd6fbe21cb 100644
--- a/arch/avr32/boards/favr-32/setup.c
+++ b/arch/avr32/boards/favr-32/setup.c
@@ -50,7 +50,7 @@ struct eth_addr {
50 u8 addr[6]; 50 u8 addr[6];
51}; 51};
52static struct eth_addr __initdata hw_addr[1]; 52static struct eth_addr __initdata hw_addr[1];
53static struct eth_platform_data __initdata eth_data[1] = { 53static struct macb_platform_data __initdata eth_data[1] = {
54 { 54 {
55 .phy_mask = ~(1U << 1), 55 .phy_mask = ~(1U << 1),
56 }, 56 },
diff --git a/arch/avr32/boards/hammerhead/setup.c b/arch/avr32/boards/hammerhead/setup.c
index da14fbdd4e8e..9d1efd1cd425 100644
--- a/arch/avr32/boards/hammerhead/setup.c
+++ b/arch/avr32/boards/hammerhead/setup.c
@@ -102,7 +102,7 @@ struct eth_addr {
102}; 102};
103 103
104static struct eth_addr __initdata hw_addr[1]; 104static struct eth_addr __initdata hw_addr[1];
105static struct eth_platform_data __initdata eth_data[1]; 105static struct macb_platform_data __initdata eth_data[1];
106 106
107/* 107/*
108 * The next two functions should go away as the boot loader is 108 * The next two functions should go away as the boot loader is
diff --git a/arch/avr32/boards/merisc/setup.c b/arch/avr32/boards/merisc/setup.c
index e61bc948f959..ed137e335796 100644
--- a/arch/avr32/boards/merisc/setup.c
+++ b/arch/avr32/boards/merisc/setup.c
@@ -52,7 +52,7 @@ struct eth_addr {
52}; 52};
53 53
54static struct eth_addr __initdata hw_addr[2]; 54static struct eth_addr __initdata hw_addr[2];
55static struct eth_platform_data __initdata eth_data[2]; 55static struct macb_platform_data __initdata eth_data[2];
56 56
57static int ads7846_get_pendown_state_PB26(void) 57static int ads7846_get_pendown_state_PB26(void)
58{ 58{
diff --git a/arch/avr32/boards/mimc200/setup.c b/arch/avr32/boards/mimc200/setup.c
index c4da5cba2dbf..05358aa5ef7d 100644
--- a/arch/avr32/boards/mimc200/setup.c
+++ b/arch/avr32/boards/mimc200/setup.c
@@ -86,7 +86,7 @@ struct eth_addr {
86 u8 addr[6]; 86 u8 addr[6];
87}; 87};
88static struct eth_addr __initdata hw_addr[2]; 88static struct eth_addr __initdata hw_addr[2];
89static struct eth_platform_data __initdata eth_data[2]; 89static struct macb_platform_data __initdata eth_data[2];
90 90
91static struct spi_eeprom eeprom_25lc010 = { 91static struct spi_eeprom eeprom_25lc010 = {
92 .name = "25lc010", 92 .name = "25lc010",
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 7fbf0dcb9afe..402a7bb72669 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1067,7 +1067,7 @@ void __init at32_setup_serial_console(unsigned int usart_id)
1067 * -------------------------------------------------------------------- */ 1067 * -------------------------------------------------------------------- */
1068 1068
1069#ifdef CONFIG_CPU_AT32AP7000 1069#ifdef CONFIG_CPU_AT32AP7000
1070static struct eth_platform_data macb0_data; 1070static struct macb_platform_data macb0_data;
1071static struct resource macb0_resource[] = { 1071static struct resource macb0_resource[] = {
1072 PBMEM(0xfff01800), 1072 PBMEM(0xfff01800),
1073 IRQ(25), 1073 IRQ(25),
@@ -1076,7 +1076,7 @@ DEFINE_DEV_DATA(macb, 0);
1076DEV_CLK(hclk, macb0, hsb, 8); 1076DEV_CLK(hclk, macb0, hsb, 8);
1077DEV_CLK(pclk, macb0, pbb, 6); 1077DEV_CLK(pclk, macb0, pbb, 6);
1078 1078
1079static struct eth_platform_data macb1_data; 1079static struct macb_platform_data macb1_data;
1080static struct resource macb1_resource[] = { 1080static struct resource macb1_resource[] = {
1081 PBMEM(0xfff01c00), 1081 PBMEM(0xfff01c00),
1082 IRQ(26), 1082 IRQ(26),
@@ -1086,7 +1086,7 @@ DEV_CLK(hclk, macb1, hsb, 9);
1086DEV_CLK(pclk, macb1, pbb, 7); 1086DEV_CLK(pclk, macb1, pbb, 7);
1087 1087
1088struct platform_device *__init 1088struct platform_device *__init
1089at32_add_device_eth(unsigned int id, struct eth_platform_data *data) 1089at32_add_device_eth(unsigned int id, struct macb_platform_data *data)
1090{ 1090{
1091 struct platform_device *pdev; 1091 struct platform_device *pdev;
1092 u32 pin_mask; 1092 u32 pin_mask;
@@ -1163,7 +1163,7 @@ at32_add_device_eth(unsigned int id, struct eth_platform_data *data)
1163 return NULL; 1163 return NULL;
1164 } 1164 }
1165 1165
1166 memcpy(pdev->dev.platform_data, data, sizeof(struct eth_platform_data)); 1166 memcpy(pdev->dev.platform_data, data, sizeof(struct macb_platform_data));
1167 platform_device_register(pdev); 1167 platform_device_register(pdev);
1168 1168
1169 return pdev; 1169 return pdev;
diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h
index 679458d9a622..67b111ce332d 100644
--- a/arch/avr32/mach-at32ap/include/mach/board.h
+++ b/arch/avr32/mach-at32ap/include/mach/board.h
@@ -6,6 +6,7 @@
6 6
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/serial.h> 8#include <linux/serial.h>
9#include <linux/platform_data/macb.h>
9 10
10#define GPIO_PIN_NONE (-1) 11#define GPIO_PIN_NONE (-1)
11 12
@@ -42,12 +43,8 @@ struct atmel_uart_data {
42void at32_map_usart(unsigned int hw_id, unsigned int line, int flags); 43void at32_map_usart(unsigned int hw_id, unsigned int line, int flags);
43struct platform_device *at32_add_device_usart(unsigned int id); 44struct platform_device *at32_add_device_usart(unsigned int id);
44 45
45struct eth_platform_data {
46 u32 phy_mask;
47 u8 is_rmii;
48};
49struct platform_device * 46struct platform_device *
50at32_add_device_eth(unsigned int id, struct eth_platform_data *data); 47at32_add_device_eth(unsigned int id, struct macb_platform_data *data);
51 48
52struct spi_board_info; 49struct spi_board_info;
53struct platform_device * 50struct platform_device *
@@ -128,7 +125,8 @@ struct atmel_nand_data {
128 u8 ale; /* address line number connected to ALE */ 125 u8 ale; /* address line number connected to ALE */
129 u8 cle; /* address line number connected to CLE */ 126 u8 cle; /* address line number connected to CLE */
130 u8 bus_width_16; /* buswidth is 16 bit */ 127 u8 bus_width_16; /* buswidth is 16 bit */
131 struct mtd_partition *(*partition_info)(int size, int *num_partitions); 128 struct mtd_partition *parts;
129 unsigned int num_parts;
132}; 130};
133struct platform_device * 131struct platform_device *
134at32_add_device_nand(unsigned int id, struct atmel_nand_data *data); 132at32_add_device_nand(unsigned int id, struct atmel_nand_data *data);
diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h
index 7fd0ec7b5b0f..ecacdf34768b 100644
--- a/arch/blackfin/include/asm/bfin_serial.h
+++ b/arch/blackfin/include/asm/bfin_serial.h
@@ -32,6 +32,8 @@ struct work_struct;
32struct bfin_serial_port { 32struct bfin_serial_port {
33 struct uart_port port; 33 struct uart_port port;
34 unsigned int old_status; 34 unsigned int old_status;
35 int tx_irq;
36 int rx_irq;
35 int status_irq; 37 int status_irq;
36#ifndef BFIN_UART_BF54X_STYLE 38#ifndef BFIN_UART_BF54X_STYLE
37 unsigned int lsr; 39 unsigned int lsr;
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index 1082e49f7a9f..d1c0c0cff3ef 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -373,8 +373,13 @@ static struct resource bfin_uart0_resources[] = {
373 .flags = IORESOURCE_MEM, 373 .flags = IORESOURCE_MEM,
374 }, 374 },
375 { 375 {
376 .start = IRQ_UART0_TX,
377 .end = IRQ_UART0_TX,
378 .flags = IORESOURCE_IRQ,
379 },
380 {
376 .start = IRQ_UART0_RX, 381 .start = IRQ_UART0_RX,
377 .end = IRQ_UART0_RX+1, 382 .end = IRQ_UART0_RX,
378 .flags = IORESOURCE_IRQ, 383 .flags = IORESOURCE_IRQ,
379 }, 384 },
380 { 385 {
@@ -416,8 +421,13 @@ static struct resource bfin_uart1_resources[] = {
416 .flags = IORESOURCE_MEM, 421 .flags = IORESOURCE_MEM,
417 }, 422 },
418 { 423 {
424 .start = IRQ_UART1_TX,
425 .end = IRQ_UART1_TX,
426 .flags = IORESOURCE_IRQ,
427 },
428 {
419 .start = IRQ_UART1_RX, 429 .start = IRQ_UART1_RX,
420 .end = IRQ_UART1_RX+1, 430 .end = IRQ_UART1_RX,
421 .flags = IORESOURCE_IRQ, 431 .flags = IORESOURCE_IRQ,
422 }, 432 },
423 { 433 {
diff --git a/arch/blackfin/mach-bf518/boards/tcm-bf518.c b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
index 55c127908815..5470bf89e52e 100644
--- a/arch/blackfin/mach-bf518/boards/tcm-bf518.c
+++ b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
@@ -309,8 +309,13 @@ static struct resource bfin_uart0_resources[] = {
309 .flags = IORESOURCE_MEM, 309 .flags = IORESOURCE_MEM,
310 }, 310 },
311 { 311 {
312 .start = IRQ_UART0_TX,
313 .end = IRQ_UART0_TX,
314 .flags = IORESOURCE_IRQ,
315 },
316 {
312 .start = IRQ_UART0_RX, 317 .start = IRQ_UART0_RX,
313 .end = IRQ_UART0_RX+1, 318 .end = IRQ_UART0_RX,
314 .flags = IORESOURCE_IRQ, 319 .flags = IORESOURCE_IRQ,
315 }, 320 },
316 { 321 {
@@ -352,8 +357,13 @@ static struct resource bfin_uart1_resources[] = {
352 .flags = IORESOURCE_MEM, 357 .flags = IORESOURCE_MEM,
353 }, 358 },
354 { 359 {
360 .start = IRQ_UART1_TX,
361 .end = IRQ_UART1_TX,
362 .flags = IORESOURCE_IRQ,
363 },
364 {
355 .start = IRQ_UART1_RX, 365 .start = IRQ_UART1_RX,
356 .end = IRQ_UART1_RX+1, 366 .end = IRQ_UART1_RX,
357 .flags = IORESOURCE_IRQ, 367 .flags = IORESOURCE_IRQ,
358 }, 368 },
359 { 369 {
diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c
index 8d65d476f118..5bc6938157ad 100644
--- a/arch/blackfin/mach-bf527/boards/ad7160eval.c
+++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c
@@ -381,8 +381,13 @@ static struct resource bfin_uart0_resources[] = {
381 .flags = IORESOURCE_MEM, 381 .flags = IORESOURCE_MEM,
382 }, 382 },
383 { 383 {
384 .start = IRQ_UART0_TX,
385 .end = IRQ_UART0_TX,
386 .flags = IORESOURCE_IRQ,
387 },
388 {
384 .start = IRQ_UART0_RX, 389 .start = IRQ_UART0_RX,
385 .end = IRQ_UART0_RX+1, 390 .end = IRQ_UART0_RX,
386 .flags = IORESOURCE_IRQ, 391 .flags = IORESOURCE_IRQ,
387 }, 392 },
388 { 393 {
@@ -424,8 +429,13 @@ static struct resource bfin_uart1_resources[] = {
424 .flags = IORESOURCE_MEM, 429 .flags = IORESOURCE_MEM,
425 }, 430 },
426 { 431 {
432 .start = IRQ_UART1_TX,
433 .end = IRQ_UART1_TX,
434 .flags = IORESOURCE_IRQ,
435 },
436 {
427 .start = IRQ_UART1_RX, 437 .start = IRQ_UART1_RX,
428 .end = IRQ_UART1_RX+1, 438 .end = IRQ_UART1_RX,
429 .flags = IORESOURCE_IRQ, 439 .flags = IORESOURCE_IRQ,
430 }, 440 },
431 { 441 {
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 6410fc1af8ed..cd289698b4dd 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/export.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
12#include <linux/mtd/mtd.h> 13#include <linux/mtd/mtd.h>
13#include <linux/mtd/partitions.h> 14#include <linux/mtd/partitions.h>
@@ -539,8 +540,13 @@ static struct resource bfin_uart0_resources[] = {
539 .flags = IORESOURCE_MEM, 540 .flags = IORESOURCE_MEM,
540 }, 541 },
541 { 542 {
543 .start = IRQ_UART0_TX,
544 .end = IRQ_UART0_TX,
545 .flags = IORESOURCE_IRQ,
546 },
547 {
542 .start = IRQ_UART0_RX, 548 .start = IRQ_UART0_RX,
543 .end = IRQ_UART0_RX+1, 549 .end = IRQ_UART0_RX,
544 .flags = IORESOURCE_IRQ, 550 .flags = IORESOURCE_IRQ,
545 }, 551 },
546 { 552 {
@@ -582,8 +588,13 @@ static struct resource bfin_uart1_resources[] = {
582 .flags = IORESOURCE_MEM, 588 .flags = IORESOURCE_MEM,
583 }, 589 },
584 { 590 {
591 .start = IRQ_UART1_TX,
592 .end = IRQ_UART1_TX,
593 .flags = IORESOURCE_IRQ,
594 },
595 {
585 .start = IRQ_UART1_RX, 596 .start = IRQ_UART1_RX,
586 .end = IRQ_UART1_RX+1, 597 .end = IRQ_UART1_RX,
587 .flags = IORESOURCE_IRQ, 598 .flags = IORESOURCE_IRQ,
588 }, 599 },
589 { 600 {
@@ -801,7 +812,6 @@ static struct platform_device bfin_sport1_uart_device = {
801#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 812#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
802#include <linux/input.h> 813#include <linux/input.h>
803#include <linux/gpio_keys.h> 814#include <linux/gpio_keys.h>
804#include <linux/export.h>
805 815
806static struct gpio_keys_button bfin_gpio_keys_table[] = { 816static struct gpio_keys_button bfin_gpio_keys_table[] = {
807 {BTN_0, GPIO_PF14, 1, "gpio-keys: BTN0"}, 817 {BTN_0, GPIO_PF14, 1, "gpio-keys: BTN0"},
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 64f7278aba53..9f792eafd1cc 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/export.h>
10#include <linux/platform_device.h> 11#include <linux/platform_device.h>
11#include <linux/mtd/mtd.h> 12#include <linux/mtd/mtd.h>
12#include <linux/mtd/partitions.h> 13#include <linux/mtd/partitions.h>
@@ -417,8 +418,13 @@ static struct resource bfin_uart0_resources[] = {
417 .flags = IORESOURCE_MEM, 418 .flags = IORESOURCE_MEM,
418 }, 419 },
419 { 420 {
421 .start = IRQ_UART0_TX,
422 .end = IRQ_UART0_TX,
423 .flags = IORESOURCE_IRQ,
424 },
425 {
420 .start = IRQ_UART0_RX, 426 .start = IRQ_UART0_RX,
421 .end = IRQ_UART0_RX+1, 427 .end = IRQ_UART0_RX,
422 .flags = IORESOURCE_IRQ, 428 .flags = IORESOURCE_IRQ,
423 }, 429 },
424 { 430 {
@@ -460,8 +466,13 @@ static struct resource bfin_uart1_resources[] = {
460 .flags = IORESOURCE_MEM, 466 .flags = IORESOURCE_MEM,
461 }, 467 },
462 { 468 {
469 .start = IRQ_UART1_TX,
470 .end = IRQ_UART1_TX,
471 .flags = IORESOURCE_IRQ,
472 },
473 {
463 .start = IRQ_UART1_RX, 474 .start = IRQ_UART1_RX,
464 .end = IRQ_UART1_RX+1, 475 .end = IRQ_UART1_RX,
465 .flags = IORESOURCE_IRQ, 476 .flags = IORESOURCE_IRQ,
466 }, 477 },
467 { 478 {
@@ -674,7 +685,6 @@ static struct platform_device bfin_sport1_uart_device = {
674#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 685#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
675#include <linux/input.h> 686#include <linux/input.h>
676#include <linux/gpio_keys.h> 687#include <linux/gpio_keys.h>
677#include <linux/export.h>
678 688
679static struct gpio_keys_button bfin_gpio_keys_table[] = { 689static struct gpio_keys_button bfin_gpio_keys_table[] = {
680 {BTN_0, GPIO_PG0, 1, "gpio-keys: BTN0"}, 690 {BTN_0, GPIO_PG0, 1, "gpio-keys: BTN0"},
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index e4c6a122b66c..3ecafff5d2ef 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -711,8 +711,13 @@ static struct resource bfin_uart0_resources[] = {
711 .flags = IORESOURCE_MEM, 711 .flags = IORESOURCE_MEM,
712 }, 712 },
713 { 713 {
714 .start = IRQ_UART0_TX,
715 .end = IRQ_UART0_TX,
716 .flags = IORESOURCE_IRQ,
717 },
718 {
714 .start = IRQ_UART0_RX, 719 .start = IRQ_UART0_RX,
715 .end = IRQ_UART0_RX+1, 720 .end = IRQ_UART0_RX,
716 .flags = IORESOURCE_IRQ, 721 .flags = IORESOURCE_IRQ,
717 }, 722 },
718 { 723 {
@@ -754,8 +759,13 @@ static struct resource bfin_uart1_resources[] = {
754 .flags = IORESOURCE_MEM, 759 .flags = IORESOURCE_MEM,
755 }, 760 },
756 { 761 {
762 .start = IRQ_UART1_TX,
763 .end = IRQ_UART1_TX,
764 .flags = IORESOURCE_IRQ,
765 },
766 {
757 .start = IRQ_UART1_RX, 767 .start = IRQ_UART1_RX,
758 .end = IRQ_UART1_RX+1, 768 .end = IRQ_UART1_RX,
759 .flags = IORESOURCE_IRQ, 769 .flags = IORESOURCE_IRQ,
760 }, 770 },
761 { 771 {
diff --git a/arch/blackfin/mach-bf527/boards/tll6527m.c b/arch/blackfin/mach-bf527/boards/tll6527m.c
index 76dbc03a8d4d..3a92c4318d2d 100644
--- a/arch/blackfin/mach-bf527/boards/tll6527m.c
+++ b/arch/blackfin/mach-bf527/boards/tll6527m.c
@@ -496,8 +496,13 @@ static struct resource bfin_uart0_resources[] = {
496 .flags = IORESOURCE_MEM, 496 .flags = IORESOURCE_MEM,
497 }, 497 },
498 { 498 {
499 .start = IRQ_UART0_TX,
500 .end = IRQ_UART0_TX,
501 .flags = IORESOURCE_IRQ,
502 },
503 {
499 .start = IRQ_UART0_RX, 504 .start = IRQ_UART0_RX,
500 .end = IRQ_UART0_RX+1, 505 .end = IRQ_UART0_RX,
501 .flags = IORESOURCE_IRQ, 506 .flags = IORESOURCE_IRQ,
502 }, 507 },
503 { 508 {
@@ -540,8 +545,13 @@ static struct resource bfin_uart1_resources[] = {
540 .flags = IORESOURCE_MEM, 545 .flags = IORESOURCE_MEM,
541 }, 546 },
542 { 547 {
548 .start = IRQ_UART1_TX,
549 .end = IRQ_UART1_TX,
550 .flags = IORESOURCE_IRQ,
551 },
552 {
543 .start = IRQ_UART1_RX, 553 .start = IRQ_UART1_RX,
544 .end = IRQ_UART1_RX+1, 554 .end = IRQ_UART1_RX,
545 .flags = IORESOURCE_IRQ, 555 .flags = IORESOURCE_IRQ,
546 }, 556 },
547 { 557 {
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index 5da5787fc4ef..47cadd316e76 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -238,8 +238,13 @@ static struct resource bfin_uart0_resources[] = {
238 .flags = IORESOURCE_MEM, 238 .flags = IORESOURCE_MEM,
239 }, 239 },
240 { 240 {
241 .start = IRQ_UART0_TX,
242 .end = IRQ_UART0_TX,
243 .flags = IORESOURCE_IRQ,
244 },
245 {
241 .start = IRQ_UART0_RX, 246 .start = IRQ_UART0_RX,
242 .end = IRQ_UART0_RX + 1, 247 .end = IRQ_UART0_RX,
243 .flags = IORESOURCE_IRQ, 248 .flags = IORESOURCE_IRQ,
244 }, 249 },
245 { 250 {
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index b0ec825fb4ec..18817d57c7a1 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -193,8 +193,13 @@ static struct resource bfin_uart0_resources[] = {
193 .flags = IORESOURCE_MEM, 193 .flags = IORESOURCE_MEM,
194 }, 194 },
195 { 195 {
196 .start = IRQ_UART0_TX,
197 .end = IRQ_UART0_TX,
198 .flags = IORESOURCE_IRQ,
199 },
200 {
196 .start = IRQ_UART0_RX, 201 .start = IRQ_UART0_RX,
197 .end = IRQ_UART0_RX + 1, 202 .end = IRQ_UART0_RX,
198 .flags = IORESOURCE_IRQ, 203 .flags = IORESOURCE_IRQ,
199 }, 204 },
200 { 205 {
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index 14f54a31e74c..2c8f30ef6a7b 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -221,8 +221,13 @@ static struct resource bfin_uart0_resources[] = {
221 .flags = IORESOURCE_MEM, 221 .flags = IORESOURCE_MEM,
222 }, 222 },
223 { 223 {
224 .start = IRQ_UART0_TX,
225 .end = IRQ_UART0_TX,
226 .flags = IORESOURCE_IRQ,
227 },
228 {
224 .start = IRQ_UART0_RX, 229 .start = IRQ_UART0_RX,
225 .end = IRQ_UART0_RX + 1, 230 .end = IRQ_UART0_RX,
226 .flags = IORESOURCE_IRQ, 231 .flags = IORESOURCE_IRQ,
227 }, 232 },
228 { 233 {
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index ecd2801f050d..144556e14499 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -292,8 +292,13 @@ static struct resource bfin_uart0_resources[] = {
292 .flags = IORESOURCE_MEM, 292 .flags = IORESOURCE_MEM,
293 }, 293 },
294 { 294 {
295 .start = IRQ_UART0_TX,
296 .end = IRQ_UART0_TX,
297 .flags = IORESOURCE_IRQ,
298 },
299 {
295 .start = IRQ_UART0_RX, 300 .start = IRQ_UART0_RX,
296 .end = IRQ_UART0_RX + 1, 301 .end = IRQ_UART0_RX,
297 .flags = IORESOURCE_IRQ, 302 .flags = IORESOURCE_IRQ,
298 }, 303 },
299 { 304 {
diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c
index fbee77fa9211..b597d4e50d58 100644
--- a/arch/blackfin/mach-bf533/boards/ip0x.c
+++ b/arch/blackfin/mach-bf533/boards/ip0x.c
@@ -151,8 +151,13 @@ static struct resource bfin_uart0_resources[] = {
151 .flags = IORESOURCE_MEM, 151 .flags = IORESOURCE_MEM,
152 }, 152 },
153 { 153 {
154 .start = IRQ_UART0_TX,
155 .end = IRQ_UART0_TX,
156 .flags = IORESOURCE_IRQ,
157 },
158 {
154 .start = IRQ_UART0_RX, 159 .start = IRQ_UART0_RX,
155 .end = IRQ_UART0_RX + 1, 160 .end = IRQ_UART0_RX,
156 .flags = IORESOURCE_IRQ, 161 .flags = IORESOURCE_IRQ,
157 }, 162 },
158 { 163 {
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 964a8e5f79b4..2afd02e14bd1 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -298,8 +298,13 @@ static struct resource bfin_uart0_resources[] = {
298 .flags = IORESOURCE_MEM, 298 .flags = IORESOURCE_MEM,
299 }, 299 },
300 { 300 {
301 .start = IRQ_UART0_TX,
302 .end = IRQ_UART0_TX,
303 .flags = IORESOURCE_IRQ,
304 },
305 {
301 .start = IRQ_UART0_RX, 306 .start = IRQ_UART0_RX,
302 .end = IRQ_UART0_RX + 1, 307 .end = IRQ_UART0_RX,
303 .flags = IORESOURCE_IRQ, 308 .flags = IORESOURCE_IRQ,
304 }, 309 },
305 { 310 {
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index 1471c51ea697..604a430038e1 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/export.h>
11#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
12#include <linux/platform_device.h> 13#include <linux/platform_device.h>
13#include <linux/mtd/mtd.h> 14#include <linux/mtd/mtd.h>
@@ -305,8 +306,13 @@ static struct resource bfin_uart0_resources[] = {
305 .flags = IORESOURCE_MEM, 306 .flags = IORESOURCE_MEM,
306 }, 307 },
307 { 308 {
309 .start = IRQ_UART0_TX,
310 .end = IRQ_UART0_TX,
311 .flags = IORESOURCE_IRQ,
312 },
313 {
308 .start = IRQ_UART0_RX, 314 .start = IRQ_UART0_RX,
309 .end = IRQ_UART0_RX+1, 315 .end = IRQ_UART0_RX,
310 .flags = IORESOURCE_IRQ, 316 .flags = IORESOURCE_IRQ,
311 }, 317 },
312 { 318 {
@@ -366,8 +372,13 @@ static struct resource bfin_uart1_resources[] = {
366 .flags = IORESOURCE_MEM, 372 .flags = IORESOURCE_MEM,
367 }, 373 },
368 { 374 {
375 .start = IRQ_UART1_TX,
376 .end = IRQ_UART1_TX,
377 .flags = IORESOURCE_IRQ,
378 },
379 {
369 .start = IRQ_UART1_RX, 380 .start = IRQ_UART1_RX,
370 .end = IRQ_UART1_RX+1, 381 .end = IRQ_UART1_RX,
371 .flags = IORESOURCE_IRQ, 382 .flags = IORESOURCE_IRQ,
372 }, 383 },
373 { 384 {
@@ -569,7 +580,6 @@ static struct platform_device bfin_sport1_uart_device = {
569 580
570#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 581#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
571#include <linux/bfin_mac.h> 582#include <linux/bfin_mac.h>
572#include <linux/export.h>
573static const unsigned short bfin_mac_peripherals[] = P_MII0; 583static const unsigned short bfin_mac_peripherals[] = P_MII0;
574 584
575static struct bfin_phydev_platform_data bfin_phydev_data[] = { 585static struct bfin_phydev_platform_data bfin_phydev_data[] = {
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index 47cf37de33ba..d916b46a44fe 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/etherdevice.h> 11#include <linux/etherdevice.h>
12#include <linux/export.h>
12#include <linux/platform_device.h> 13#include <linux/platform_device.h>
13#include <linux/mtd/mtd.h> 14#include <linux/mtd/mtd.h>
14#include <linux/mtd/partitions.h> 15#include <linux/mtd/partitions.h>
@@ -306,8 +307,13 @@ static struct resource bfin_uart0_resources[] = {
306 .flags = IORESOURCE_MEM, 307 .flags = IORESOURCE_MEM,
307 }, 308 },
308 { 309 {
310 .start = IRQ_UART0_TX,
311 .end = IRQ_UART0_TX,
312 .flags = IORESOURCE_IRQ,
313 },
314 {
309 .start = IRQ_UART0_RX, 315 .start = IRQ_UART0_RX,
310 .end = IRQ_UART0_RX+1, 316 .end = IRQ_UART0_RX,
311 .flags = IORESOURCE_IRQ, 317 .flags = IORESOURCE_IRQ,
312 }, 318 },
313 { 319 {
@@ -349,8 +355,13 @@ static struct resource bfin_uart1_resources[] = {
349 .flags = IORESOURCE_MEM, 355 .flags = IORESOURCE_MEM,
350 }, 356 },
351 { 357 {
358 .start = IRQ_UART1_TX,
359 .end = IRQ_UART1_TX,
360 .flags = IORESOURCE_IRQ,
361 },
362 {
352 .start = IRQ_UART1_RX, 363 .start = IRQ_UART1_RX,
353 .end = IRQ_UART1_RX+1, 364 .end = IRQ_UART1_RX,
354 .flags = IORESOURCE_IRQ, 365 .flags = IORESOURCE_IRQ,
355 }, 366 },
356 { 367 {
@@ -534,7 +545,6 @@ static struct platform_device bfin_sport1_uart_device = {
534 545
535#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 546#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
536#include <linux/bfin_mac.h> 547#include <linux/bfin_mac.h>
537#include <linux/export.h>
538static const unsigned short bfin_mac_peripherals[] = P_MII0; 548static const unsigned short bfin_mac_peripherals[] = P_MII0;
539 549
540static struct bfin_phydev_platform_data bfin_phydev_data[] = { 550static struct bfin_phydev_platform_data bfin_phydev_data[] = {
diff --git a/arch/blackfin/mach-bf537/boards/dnp5370.c b/arch/blackfin/mach-bf537/boards/dnp5370.c
index 33e69e427e98..5f307228be63 100644
--- a/arch/blackfin/mach-bf537/boards/dnp5370.c
+++ b/arch/blackfin/mach-bf537/boards/dnp5370.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/export.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
17#include <linux/io.h> 18#include <linux/io.h>
@@ -49,7 +50,6 @@ static struct platform_device rtc_device = {
49 50
50#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 51#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
51#include <linux/bfin_mac.h> 52#include <linux/bfin_mac.h>
52#include <linux/export.h>
53static const unsigned short bfin_mac_peripherals[] = P_RMII0; 53static const unsigned short bfin_mac_peripherals[] = P_RMII0;
54 54
55static struct bfin_phydev_platform_data bfin_phydev_data[] = { 55static struct bfin_phydev_platform_data bfin_phydev_data[] = {
@@ -237,8 +237,13 @@ static struct resource bfin_uart0_resources[] = {
237 .flags = IORESOURCE_MEM, 237 .flags = IORESOURCE_MEM,
238 }, 238 },
239 { 239 {
240 .start = IRQ_UART0_TX,
241 .end = IRQ_UART0_TX,
242 .flags = IORESOURCE_IRQ,
243 },
244 {
240 .start = IRQ_UART0_RX, 245 .start = IRQ_UART0_RX,
241 .end = IRQ_UART0_RX+1, 246 .end = IRQ_UART0_RX,
242 .flags = IORESOURCE_IRQ, 247 .flags = IORESOURCE_IRQ,
243 }, 248 },
244 { 249 {
@@ -281,8 +286,13 @@ static struct resource bfin_uart1_resources[] = {
281 .flags = IORESOURCE_MEM, 286 .flags = IORESOURCE_MEM,
282 }, 287 },
283 { 288 {
289 .start = IRQ_UART1_TX,
290 .end = IRQ_UART1_TX,
291 .flags = IORESOURCE_IRQ,
292 },
293 {
284 .start = IRQ_UART1_RX, 294 .start = IRQ_UART1_RX,
285 .end = IRQ_UART1_RX+1, 295 .end = IRQ_UART1_RX,
286 .flags = IORESOURCE_IRQ, 296 .flags = IORESOURCE_IRQ,
287 }, 297 },
288 { 298 {
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index c62f9dccd9f7..3901dd093b90 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -240,8 +240,13 @@ static struct resource bfin_uart0_resources[] = {
240 .flags = IORESOURCE_MEM, 240 .flags = IORESOURCE_MEM,
241 }, 241 },
242 { 242 {
243 .start = IRQ_UART0_TX,
244 .end = IRQ_UART0_TX,
245 .flags = IORESOURCE_IRQ,
246 },
247 {
243 .start = IRQ_UART0_RX, 248 .start = IRQ_UART0_RX,
244 .end = IRQ_UART0_RX+1, 249 .end = IRQ_UART0_RX,
245 .flags = IORESOURCE_IRQ, 250 .flags = IORESOURCE_IRQ,
246 }, 251 },
247 { 252 {
@@ -283,8 +288,13 @@ static struct resource bfin_uart1_resources[] = {
283 .flags = IORESOURCE_MEM, 288 .flags = IORESOURCE_MEM,
284 }, 289 },
285 { 290 {
291 .start = IRQ_UART1_TX,
292 .end = IRQ_UART1_TX,
293 .flags = IORESOURCE_IRQ,
294 },
295 {
286 .start = IRQ_UART1_RX, 296 .start = IRQ_UART1_RX,
287 .end = IRQ_UART1_RX+1, 297 .end = IRQ_UART1_RX,
288 .flags = IORESOURCE_IRQ, 298 .flags = IORESOURCE_IRQ,
289 }, 299 },
290 { 300 {
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index 3099e91114fc..aebd31c845f0 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/etherdevice.h> 10#include <linux/etherdevice.h>
11#include <linux/export.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
12#include <linux/mtd/mtd.h> 13#include <linux/mtd/mtd.h>
13#include <linux/mtd/partitions.h> 14#include <linux/mtd/partitions.h>
@@ -309,8 +310,13 @@ static struct resource bfin_uart0_resources[] = {
309 .flags = IORESOURCE_MEM, 310 .flags = IORESOURCE_MEM,
310 }, 311 },
311 { 312 {
313 .start = IRQ_UART0_TX,
314 .end = IRQ_UART0_TX,
315 .flags = IORESOURCE_IRQ,
316 },
317 {
312 .start = IRQ_UART0_RX, 318 .start = IRQ_UART0_RX,
313 .end = IRQ_UART0_RX+1, 319 .end = IRQ_UART0_RX,
314 .flags = IORESOURCE_IRQ, 320 .flags = IORESOURCE_IRQ,
315 }, 321 },
316 { 322 {
@@ -352,8 +358,13 @@ static struct resource bfin_uart1_resources[] = {
352 .flags = IORESOURCE_MEM, 358 .flags = IORESOURCE_MEM,
353 }, 359 },
354 { 360 {
361 .start = IRQ_UART1_TX,
362 .end = IRQ_UART1_TX,
363 .flags = IORESOURCE_IRQ,
364 },
365 {
355 .start = IRQ_UART1_RX, 366 .start = IRQ_UART1_RX,
356 .end = IRQ_UART1_RX+1, 367 .end = IRQ_UART1_RX,
357 .flags = IORESOURCE_IRQ, 368 .flags = IORESOURCE_IRQ,
358 }, 369 },
359 { 370 {
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 27f955db9976..7fbb0bbf8676 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/export.h>
10#include <linux/kernel.h> 11#include <linux/kernel.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
12#include <linux/io.h> 13#include <linux/io.h>
@@ -1566,8 +1567,13 @@ static struct resource bfin_uart0_resources[] = {
1566 .flags = IORESOURCE_MEM, 1567 .flags = IORESOURCE_MEM,
1567 }, 1568 },
1568 { 1569 {
1570 .start = IRQ_UART0_TX,
1571 .end = IRQ_UART0_TX,
1572 .flags = IORESOURCE_IRQ,
1573 },
1574 {
1569 .start = IRQ_UART0_RX, 1575 .start = IRQ_UART0_RX,
1570 .end = IRQ_UART0_RX+1, 1576 .end = IRQ_UART0_RX,
1571 .flags = IORESOURCE_IRQ, 1577 .flags = IORESOURCE_IRQ,
1572 }, 1578 },
1573 { 1579 {
@@ -1621,8 +1627,13 @@ static struct resource bfin_uart1_resources[] = {
1621 .flags = IORESOURCE_MEM, 1627 .flags = IORESOURCE_MEM,
1622 }, 1628 },
1623 { 1629 {
1630 .start = IRQ_UART1_TX,
1631 .end = IRQ_UART1_TX,
1632 .flags = IORESOURCE_IRQ,
1633 },
1634 {
1624 .start = IRQ_UART1_RX, 1635 .start = IRQ_UART1_RX,
1625 .end = IRQ_UART1_RX+1, 1636 .end = IRQ_UART1_RX,
1626 .flags = IORESOURCE_IRQ, 1637 .flags = IORESOURCE_IRQ,
1627 }, 1638 },
1628 { 1639 {
@@ -1992,7 +2003,6 @@ static struct adp8870_backlight_platform_data adp8870_pdata = {
1992 2003
1993#if defined(CONFIG_BACKLIGHT_ADP8860) || defined(CONFIG_BACKLIGHT_ADP8860_MODULE) 2004#if defined(CONFIG_BACKLIGHT_ADP8860) || defined(CONFIG_BACKLIGHT_ADP8860_MODULE)
1994#include <linux/i2c/adp8860.h> 2005#include <linux/i2c/adp8860.h>
1995#include <linux/export.h>
1996static struct led_info adp8860_leds[] = { 2006static struct led_info adp8860_leds[] = {
1997 { 2007 {
1998 .name = "adp8860-led7", 2008 .name = "adp8860-led7",
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 841803038d6f..6917ce2fa55e 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/etherdevice.h> 11#include <linux/etherdevice.h>
12#include <linux/export.h>
12#include <linux/platform_device.h> 13#include <linux/platform_device.h>
13#include <linux/mtd/mtd.h> 14#include <linux/mtd/mtd.h>
14#include <linux/mtd/partitions.h> 15#include <linux/mtd/partitions.h>
@@ -306,8 +307,13 @@ static struct resource bfin_uart0_resources[] = {
306 .flags = IORESOURCE_MEM, 307 .flags = IORESOURCE_MEM,
307 }, 308 },
308 { 309 {
310 .start = IRQ_UART0_TX,
311 .end = IRQ_UART0_TX,
312 .flags = IORESOURCE_IRQ,
313 },
314 {
309 .start = IRQ_UART0_RX, 315 .start = IRQ_UART0_RX,
310 .end = IRQ_UART0_RX+1, 316 .end = IRQ_UART0_RX,
311 .flags = IORESOURCE_IRQ, 317 .flags = IORESOURCE_IRQ,
312 }, 318 },
313 { 319 {
@@ -349,8 +355,13 @@ static struct resource bfin_uart1_resources[] = {
349 .flags = IORESOURCE_MEM, 355 .flags = IORESOURCE_MEM,
350 }, 356 },
351 { 357 {
358 .start = IRQ_UART1_TX,
359 .end = IRQ_UART1_TX,
360 .flags = IORESOURCE_IRQ,
361 },
362 {
352 .start = IRQ_UART1_RX, 363 .start = IRQ_UART1_RX,
353 .end = IRQ_UART1_RX+1, 364 .end = IRQ_UART1_RX,
354 .flags = IORESOURCE_IRQ, 365 .flags = IORESOURCE_IRQ,
355 }, 366 },
356 { 367 {
@@ -536,7 +547,6 @@ static struct platform_device bfin_sport1_uart_device = {
536 547
537#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 548#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
538#include <linux/bfin_mac.h> 549#include <linux/bfin_mac.h>
539#include <linux/export.h>
540static const unsigned short bfin_mac_peripherals[] = P_MII0; 550static const unsigned short bfin_mac_peripherals[] = P_MII0;
541 551
542static struct bfin_phydev_platform_data bfin_phydev_data[] = { 552static struct bfin_phydev_platform_data bfin_phydev_data[] = {
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index 629f3c333415..8356eb599f19 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -49,8 +49,13 @@ static struct resource bfin_uart0_resources[] = {
49 .flags = IORESOURCE_MEM, 49 .flags = IORESOURCE_MEM,
50 }, 50 },
51 { 51 {
52 .start = IRQ_UART0_TX,
53 .end = IRQ_UART0_TX,
54 .flags = IORESOURCE_IRQ,
55 },
56 {
52 .start = IRQ_UART0_RX, 57 .start = IRQ_UART0_RX,
53 .end = IRQ_UART0_RX+1, 58 .end = IRQ_UART0_RX,
54 .flags = IORESOURCE_IRQ, 59 .flags = IORESOURCE_IRQ,
55 }, 60 },
56 { 61 {
@@ -104,8 +109,13 @@ static struct resource bfin_uart1_resources[] = {
104 .flags = IORESOURCE_MEM, 109 .flags = IORESOURCE_MEM,
105 }, 110 },
106 { 111 {
112 .start = IRQ_UART1_TX,
113 .end = IRQ_UART1_TX,
114 .flags = IORESOURCE_IRQ,
115 },
116 {
107 .start = IRQ_UART1_RX, 117 .start = IRQ_UART1_RX,
108 .end = IRQ_UART1_RX+1, 118 .end = IRQ_UART1_RX,
109 .flags = IORESOURCE_IRQ, 119 .flags = IORESOURCE_IRQ,
110 }, 120 },
111 { 121 {
@@ -147,8 +157,13 @@ static struct resource bfin_uart2_resources[] = {
147 .flags = IORESOURCE_MEM, 157 .flags = IORESOURCE_MEM,
148 }, 158 },
149 { 159 {
160 .start = IRQ_UART2_TX,
161 .end = IRQ_UART2_TX,
162 .flags = IORESOURCE_IRQ,
163 },
164 {
150 .start = IRQ_UART2_RX, 165 .start = IRQ_UART2_RX,
151 .end = IRQ_UART2_RX+1, 166 .end = IRQ_UART2_RX,
152 .flags = IORESOURCE_IRQ, 167 .flags = IORESOURCE_IRQ,
153 }, 168 },
154 { 169 {
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index 212b9e0a08c8..0350eacec21b 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -135,8 +135,13 @@ static struct resource bfin_uart0_resources[] = {
135 .flags = IORESOURCE_MEM, 135 .flags = IORESOURCE_MEM,
136 }, 136 },
137 { 137 {
138 .start = IRQ_UART0_TX,
139 .end = IRQ_UART0_TX,
140 .flags = IORESOURCE_IRQ,
141 },
142 {
138 .start = IRQ_UART0_RX, 143 .start = IRQ_UART0_RX,
139 .end = IRQ_UART0_RX+1, 144 .end = IRQ_UART0_RX,
140 .flags = IORESOURCE_IRQ, 145 .flags = IORESOURCE_IRQ,
141 }, 146 },
142 { 147 {
@@ -178,8 +183,13 @@ static struct resource bfin_uart1_resources[] = {
178 .flags = IORESOURCE_MEM, 183 .flags = IORESOURCE_MEM,
179 }, 184 },
180 { 185 {
186 .start = IRQ_UART1_TX,
187 .end = IRQ_UART1_TX,
188 .flags = IORESOURCE_IRQ,
189 },
190 {
181 .start = IRQ_UART1_RX, 191 .start = IRQ_UART1_RX,
182 .end = IRQ_UART1_RX+1, 192 .end = IRQ_UART1_RX,
183 .flags = IORESOURCE_IRQ, 193 .flags = IORESOURCE_IRQ,
184 }, 194 },
185 { 195 {
@@ -237,8 +247,13 @@ static struct resource bfin_uart2_resources[] = {
237 .flags = IORESOURCE_MEM, 247 .flags = IORESOURCE_MEM,
238 }, 248 },
239 { 249 {
250 .start = IRQ_UART2_TX,
251 .end = IRQ_UART2_TX,
252 .flags = IORESOURCE_IRQ,
253 },
254 {
240 .start = IRQ_UART2_RX, 255 .start = IRQ_UART2_RX,
241 .end = IRQ_UART2_RX+1, 256 .end = IRQ_UART2_RX,
242 .flags = IORESOURCE_IRQ, 257 .flags = IORESOURCE_IRQ,
243 }, 258 },
244 { 259 {
@@ -280,8 +295,13 @@ static struct resource bfin_uart3_resources[] = {
280 .flags = IORESOURCE_MEM, 295 .flags = IORESOURCE_MEM,
281 }, 296 },
282 { 297 {
298 .start = IRQ_UART3_TX,
299 .end = IRQ_UART3_TX,
300 .flags = IORESOURCE_IRQ,
301 },
302 {
283 .start = IRQ_UART3_RX, 303 .start = IRQ_UART3_RX,
284 .end = IRQ_UART3_RX+1, 304 .end = IRQ_UART3_RX,
285 .flags = IORESOURCE_IRQ, 305 .flags = IORESOURCE_IRQ,
286 }, 306 },
287 { 307 {
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index cd9cbb68de69..bb868ac0fe2d 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -241,8 +241,13 @@ static struct resource bfin_uart0_resources[] = {
241 .flags = IORESOURCE_MEM, 241 .flags = IORESOURCE_MEM,
242 }, 242 },
243 { 243 {
244 .start = IRQ_UART0_TX,
245 .end = IRQ_UART0_TX,
246 .flags = IORESOURCE_IRQ,
247 },
248 {
244 .start = IRQ_UART0_RX, 249 .start = IRQ_UART0_RX,
245 .end = IRQ_UART0_RX+1, 250 .end = IRQ_UART0_RX,
246 .flags = IORESOURCE_IRQ, 251 .flags = IORESOURCE_IRQ,
247 }, 252 },
248 { 253 {
@@ -284,8 +289,13 @@ static struct resource bfin_uart1_resources[] = {
284 .flags = IORESOURCE_MEM, 289 .flags = IORESOURCE_MEM,
285 }, 290 },
286 { 291 {
292 .start = IRQ_UART1_TX,
293 .end = IRQ_UART1_TX,
294 .flags = IORESOURCE_IRQ,
295 },
296 {
287 .start = IRQ_UART1_RX, 297 .start = IRQ_UART1_RX,
288 .end = IRQ_UART1_RX+1, 298 .end = IRQ_UART1_RX,
289 .flags = IORESOURCE_IRQ, 299 .flags = IORESOURCE_IRQ,
290 }, 300 },
291 { 301 {
@@ -343,8 +353,13 @@ static struct resource bfin_uart2_resources[] = {
343 .flags = IORESOURCE_MEM, 353 .flags = IORESOURCE_MEM,
344 }, 354 },
345 { 355 {
356 .start = IRQ_UART2_TX,
357 .end = IRQ_UART2_TX,
358 .flags = IORESOURCE_IRQ,
359 },
360 {
346 .start = IRQ_UART2_RX, 361 .start = IRQ_UART2_RX,
347 .end = IRQ_UART2_RX+1, 362 .end = IRQ_UART2_RX,
348 .flags = IORESOURCE_IRQ, 363 .flags = IORESOURCE_IRQ,
349 }, 364 },
350 { 365 {
@@ -386,8 +401,13 @@ static struct resource bfin_uart3_resources[] = {
386 .flags = IORESOURCE_MEM, 401 .flags = IORESOURCE_MEM,
387 }, 402 },
388 { 403 {
404 .start = IRQ_UART3_TX,
405 .end = IRQ_UART3_TX,
406 .flags = IORESOURCE_IRQ,
407 },
408 {
389 .start = IRQ_UART3_RX, 409 .start = IRQ_UART3_RX,
390 .end = IRQ_UART3_RX+1, 410 .end = IRQ_UART3_RX,
391 .flags = IORESOURCE_IRQ, 411 .flags = IORESOURCE_IRQ,
392 }, 412 },
393 { 413 {
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 972e1347c6bc..b1b7339b6ba7 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -203,8 +203,13 @@ static struct resource bfin_uart0_resources[] = {
203 .flags = IORESOURCE_MEM, 203 .flags = IORESOURCE_MEM,
204 }, 204 },
205 { 205 {
206 .start = IRQ_UART_TX,
207 .end = IRQ_UART_TX,
208 .flags = IORESOURCE_IRQ,
209 },
210 {
206 .start = IRQ_UART_RX, 211 .start = IRQ_UART_RX,
207 .end = IRQ_UART_RX + 1, 212 .end = IRQ_UART_RX,
208 .flags = IORESOURCE_IRQ, 213 .flags = IORESOURCE_IRQ,
209 }, 214 },
210 { 215 {
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index c1b72f2d6354..c017cf07ed4e 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -277,8 +277,13 @@ static struct resource bfin_uart0_resources[] = {
277 .flags = IORESOURCE_MEM, 277 .flags = IORESOURCE_MEM,
278 }, 278 },
279 { 279 {
280 .start = IRQ_UART_TX,
281 .end = IRQ_UART_TX,
282 .flags = IORESOURCE_IRQ,
283 },
284 {
280 .start = IRQ_UART_RX, 285 .start = IRQ_UART_RX,
281 .end = IRQ_UART_RX+1, 286 .end = IRQ_UART_RX,
282 .flags = IORESOURCE_IRQ, 287 .flags = IORESOURCE_IRQ,
283 }, 288 },
284 { 289 {
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 9490dc800ca5..27f22ed381d9 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -172,8 +172,13 @@ static struct resource bfin_uart0_resources[] = {
172 .flags = IORESOURCE_MEM, 172 .flags = IORESOURCE_MEM,
173 }, 173 },
174 { 174 {
175 .start = IRQ_UART_TX,
176 .end = IRQ_UART_TX,
177 .flags = IORESOURCE_IRQ,
178 },
179 {
175 .start = IRQ_UART_RX, 180 .start = IRQ_UART_RX,
176 .end = IRQ_UART_RX+1, 181 .end = IRQ_UART_RX,
177 .flags = IORESOURCE_IRQ, 182 .flags = IORESOURCE_IRQ,
178 }, 183 },
179 { 184 {
diff --git a/arch/blackfin/mach-bf561/boards/tepla.c b/arch/blackfin/mach-bf561/boards/tepla.c
index bb056e60f6ed..1a57bc986aad 100644
--- a/arch/blackfin/mach-bf561/boards/tepla.c
+++ b/arch/blackfin/mach-bf561/boards/tepla.c
@@ -51,8 +51,13 @@ static struct resource bfin_uart0_resources[] = {
51 .flags = IORESOURCE_MEM, 51 .flags = IORESOURCE_MEM,
52 }, 52 },
53 { 53 {
54 .start = IRQ_UART_TX,
55 .end = IRQ_UART_TX,
56 .flags = IORESOURCE_IRQ,
57 },
58 {
54 .start = IRQ_UART_RX, 59 .start = IRQ_UART_RX,
55 .end = IRQ_UART_RX+1, 60 .end = IRQ_UART_RX,
56 .flags = IORESOURCE_IRQ, 61 .flags = IORESOURCE_IRQ,
57 }, 62 },
58 { 63 {
diff --git a/arch/cris/arch-v32/drivers/mach-a3/nandflash.c b/arch/cris/arch-v32/drivers/mach-a3/nandflash.c
index f58f2c1c5295..7fb52128ddc9 100644
--- a/arch/cris/arch-v32/drivers/mach-a3/nandflash.c
+++ b/arch/cris/arch-v32/drivers/mach-a3/nandflash.c
@@ -163,7 +163,7 @@ struct mtd_info *__init crisv32_nand_flash_probe(void)
163 this->ecc.mode = NAND_ECC_SOFT; 163 this->ecc.mode = NAND_ECC_SOFT;
164 164
165 /* Enable the following for a flash based bad block table */ 165 /* Enable the following for a flash based bad block table */
166 /* this->options = NAND_USE_FLASH_BBT; */ 166 /* this->bbt_options = NAND_BBT_USE_FLASH; */
167 167
168 /* Scan to find existence of the device */ 168 /* Scan to find existence of the device */
169 if (nand_scan(crisv32_mtd, 1)) { 169 if (nand_scan(crisv32_mtd, 1)) {
diff --git a/arch/cris/arch-v32/drivers/mach-fs/nandflash.c b/arch/cris/arch-v32/drivers/mach-fs/nandflash.c
index d5b0cc9f976b..e03238454b0e 100644
--- a/arch/cris/arch-v32/drivers/mach-fs/nandflash.c
+++ b/arch/cris/arch-v32/drivers/mach-fs/nandflash.c
@@ -154,7 +154,7 @@ struct mtd_info *__init crisv32_nand_flash_probe(void)
154 this->ecc.mode = NAND_ECC_SOFT; 154 this->ecc.mode = NAND_ECC_SOFT;
155 155
156 /* Enable the following for a flash based bad block table */ 156 /* Enable the following for a flash based bad block table */
157 /* this->options = NAND_USE_FLASH_BBT; */ 157 /* this->bbt_options = NAND_BBT_USE_FLASH; */
158 158
159 /* Scan to find existence of the device */ 159 /* Scan to find existence of the device */
160 if (nand_scan(crisv32_mtd, 1)) { 160 if (nand_scan(crisv32_mtd, 1)) {
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 6c28582fb98f..361d54019bb0 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -4,8 +4,8 @@ config M68K
4 select HAVE_IDE 4 select HAVE_IDE
5 select HAVE_AOUT if MMU 5 select HAVE_AOUT if MMU
6 select GENERIC_ATOMIC64 if MMU 6 select GENERIC_ATOMIC64 if MMU
7 select HAVE_GENERIC_HARDIRQS if !MMU 7 select HAVE_GENERIC_HARDIRQS
8 select GENERIC_IRQ_SHOW if !MMU 8 select GENERIC_IRQ_SHOW
9 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS 9 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
10 10
11config RWSEM_GENERIC_SPINLOCK 11config RWSEM_GENERIC_SPINLOCK
diff --git a/arch/m68k/Kconfig.bus b/arch/m68k/Kconfig.bus
index 8294f0c1785e..3adb499584fb 100644
--- a/arch/m68k/Kconfig.bus
+++ b/arch/m68k/Kconfig.bus
@@ -2,6 +2,15 @@ if MMU
2 2
3comment "Bus Support" 3comment "Bus Support"
4 4
5config DIO
6 bool "DIO bus support"
7 depends on HP300
8 default y
9 help
10 Say Y here to enable support for the "DIO" expansion bus used in
11 HP300 machines. If you are using such a system you almost certainly
12 want this.
13
5config NUBUS 14config NUBUS
6 bool 15 bool
7 depends on MAC 16 depends on MAC
diff --git a/arch/m68k/Kconfig.devices b/arch/m68k/Kconfig.devices
index d214034be6a6..6033f5d4e67e 100644
--- a/arch/m68k/Kconfig.devices
+++ b/arch/m68k/Kconfig.devices
@@ -24,6 +24,37 @@ config PROC_HARDWARE
24 including the model, CPU, MMU, clock speed, BogoMIPS rating, 24 including the model, CPU, MMU, clock speed, BogoMIPS rating,
25 and memory size. 25 and memory size.
26 26
27config NATFEAT
28 bool "ARAnyM emulator support"
29 depends on ATARI
30 help
31 This option enables support for ARAnyM native features, such as
32 access to a disk image as /dev/hda.
33
34config NFBLOCK
35 tristate "NatFeat block device support"
36 depends on BLOCK && NATFEAT
37 help
38 Say Y to include support for the ARAnyM NatFeat block device
39 which allows direct access to the hard drives without using
40 the hardware emulation.
41
42config NFCON
43 tristate "NatFeat console driver"
44 depends on NATFEAT
45 help
46 Say Y to include support for the ARAnyM NatFeat console driver
47 which allows the console output to be redirected to the stderr
48 output of ARAnyM.
49
50config NFETH
51 tristate "NatFeat Ethernet support"
52 depends on ETHERNET && NATFEAT
53 help
54 Say Y to include support for the ARAnyM NatFeat network device
55 which will emulate a regular ethernet device while presenting an
56 ethertap device to the host system.
57
27endmenu 58endmenu
28 59
29menu "Character devices" 60menu "Character devices"
diff --git a/arch/m68k/amiga/amiints.c b/arch/m68k/amiga/amiints.c
index c5b5212cc3f9..47b5f90002ab 100644
--- a/arch/m68k/amiga/amiints.c
+++ b/arch/m68k/amiga/amiints.c
@@ -1,43 +1,15 @@
1/* 1/*
2 * linux/arch/m68k/amiga/amiints.c -- Amiga Linux interrupt handling code 2 * Amiga Linux interrupt handling code
3 * 3 *
4 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file COPYING in the main directory of this archive 5 * License. See the file COPYING in the main directory of this archive
6 * for more details. 6 * for more details.
7 *
8 * 11/07/96: rewritten interrupt handling, irq lists are exists now only for
9 * this sources where it makes sense (VERTB/PORTS/EXTER) and you must
10 * be careful that dev_id for this sources is unique since this the
11 * only possibility to distinguish between different handlers for
12 * free_irq. irq lists also have different irq flags:
13 * - IRQ_FLG_FAST: handler is inserted at top of list (after other
14 * fast handlers)
15 * - IRQ_FLG_SLOW: handler is inserted at bottom of list and before
16 * they're executed irq level is set to the previous
17 * one, but handlers don't need to be reentrant, if
18 * reentrance occurred, slow handlers will be just
19 * called again.
20 * The whole interrupt handling for CIAs is moved to cia.c
21 * /Roman Zippel
22 *
23 * 07/08/99: rewamp of the interrupt handling - we now have two types of
24 * interrupts, normal and fast handlers, fast handlers being
25 * marked with IRQF_DISABLED and runs with all other interrupts
26 * disabled. Normal interrupts disable their own source but
27 * run with all other interrupt sources enabled.
28 * PORTS and EXTER interrupts are always shared even if the
29 * drivers do not explicitly mark this when calling
30 * request_irq which they really should do.
31 * This is similar to the way interrupts are handled on all
32 * other architectures and makes a ton of sense besides
33 * having the advantage of making it easier to share
34 * drivers.
35 * /Jes
36 */ 7 */
37 8
38#include <linux/init.h> 9#include <linux/init.h>
39#include <linux/interrupt.h> 10#include <linux/interrupt.h>
40#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/irq.h>
41 13
42#include <asm/irq.h> 14#include <asm/irq.h>
43#include <asm/traps.h> 15#include <asm/traps.h>
@@ -45,56 +17,6 @@
45#include <asm/amigaints.h> 17#include <asm/amigaints.h>
46#include <asm/amipcmcia.h> 18#include <asm/amipcmcia.h>
47 19
48static void amiga_enable_irq(unsigned int irq);
49static void amiga_disable_irq(unsigned int irq);
50static irqreturn_t ami_int1(int irq, void *dev_id);
51static irqreturn_t ami_int3(int irq, void *dev_id);
52static irqreturn_t ami_int4(int irq, void *dev_id);
53static irqreturn_t ami_int5(int irq, void *dev_id);
54
55static struct irq_controller amiga_irq_controller = {
56 .name = "amiga",
57 .lock = __SPIN_LOCK_UNLOCKED(amiga_irq_controller.lock),
58 .enable = amiga_enable_irq,
59 .disable = amiga_disable_irq,
60};
61
62/*
63 * void amiga_init_IRQ(void)
64 *
65 * Parameters: None
66 *
67 * Returns: Nothing
68 *
69 * This function should be called during kernel startup to initialize
70 * the amiga IRQ handling routines.
71 */
72
73void __init amiga_init_IRQ(void)
74{
75 if (request_irq(IRQ_AUTO_1, ami_int1, 0, "int1", NULL))
76 pr_err("Couldn't register int%d\n", 1);
77 if (request_irq(IRQ_AUTO_3, ami_int3, 0, "int3", NULL))
78 pr_err("Couldn't register int%d\n", 3);
79 if (request_irq(IRQ_AUTO_4, ami_int4, 0, "int4", NULL))
80 pr_err("Couldn't register int%d\n", 4);
81 if (request_irq(IRQ_AUTO_5, ami_int5, 0, "int5", NULL))
82 pr_err("Couldn't register int%d\n", 5);
83
84 m68k_setup_irq_controller(&amiga_irq_controller, IRQ_USER, AMI_STD_IRQS);
85
86 /* turn off PCMCIA interrupts */
87 if (AMIGAHW_PRESENT(PCMCIA))
88 gayle.inten = GAYLE_IRQ_IDE;
89
90 /* turn off all interrupts and enable the master interrupt bit */
91 amiga_custom.intena = 0x7fff;
92 amiga_custom.intreq = 0x7fff;
93 amiga_custom.intena = IF_SETCLR | IF_INTEN;
94
95 cia_init_IRQ(&ciaa_base);
96 cia_init_IRQ(&ciab_base);
97}
98 20
99/* 21/*
100 * Enable/disable a particular machine specific interrupt source. 22 * Enable/disable a particular machine specific interrupt source.
@@ -103,112 +25,150 @@ void __init amiga_init_IRQ(void)
103 * internal data, that may not be changed by the interrupt at the same time. 25 * internal data, that may not be changed by the interrupt at the same time.
104 */ 26 */
105 27
106static void amiga_enable_irq(unsigned int irq) 28static void amiga_irq_enable(struct irq_data *data)
107{ 29{
108 amiga_custom.intena = IF_SETCLR | (1 << (irq - IRQ_USER)); 30 amiga_custom.intena = IF_SETCLR | (1 << (data->irq - IRQ_USER));
109} 31}
110 32
111static void amiga_disable_irq(unsigned int irq) 33static void amiga_irq_disable(struct irq_data *data)
112{ 34{
113 amiga_custom.intena = 1 << (irq - IRQ_USER); 35 amiga_custom.intena = 1 << (data->irq - IRQ_USER);
114} 36}
115 37
38static struct irq_chip amiga_irq_chip = {
39 .name = "amiga",
40 .irq_enable = amiga_irq_enable,
41 .irq_disable = amiga_irq_disable,
42};
43
44
116/* 45/*
117 * The builtin Amiga hardware interrupt handlers. 46 * The builtin Amiga hardware interrupt handlers.
118 */ 47 */
119 48
120static irqreturn_t ami_int1(int irq, void *dev_id) 49static void ami_int1(unsigned int irq, struct irq_desc *desc)
121{ 50{
122 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; 51 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
123 52
124 /* if serial transmit buffer empty, interrupt */ 53 /* if serial transmit buffer empty, interrupt */
125 if (ints & IF_TBE) { 54 if (ints & IF_TBE) {
126 amiga_custom.intreq = IF_TBE; 55 amiga_custom.intreq = IF_TBE;
127 m68k_handle_int(IRQ_AMIGA_TBE); 56 generic_handle_irq(IRQ_AMIGA_TBE);
128 } 57 }
129 58
130 /* if floppy disk transfer complete, interrupt */ 59 /* if floppy disk transfer complete, interrupt */
131 if (ints & IF_DSKBLK) { 60 if (ints & IF_DSKBLK) {
132 amiga_custom.intreq = IF_DSKBLK; 61 amiga_custom.intreq = IF_DSKBLK;
133 m68k_handle_int(IRQ_AMIGA_DSKBLK); 62 generic_handle_irq(IRQ_AMIGA_DSKBLK);
134 } 63 }
135 64
136 /* if software interrupt set, interrupt */ 65 /* if software interrupt set, interrupt */
137 if (ints & IF_SOFT) { 66 if (ints & IF_SOFT) {
138 amiga_custom.intreq = IF_SOFT; 67 amiga_custom.intreq = IF_SOFT;
139 m68k_handle_int(IRQ_AMIGA_SOFT); 68 generic_handle_irq(IRQ_AMIGA_SOFT);
140 } 69 }
141 return IRQ_HANDLED;
142} 70}
143 71
144static irqreturn_t ami_int3(int irq, void *dev_id) 72static void ami_int3(unsigned int irq, struct irq_desc *desc)
145{ 73{
146 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; 74 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
147 75
148 /* if a blitter interrupt */ 76 /* if a blitter interrupt */
149 if (ints & IF_BLIT) { 77 if (ints & IF_BLIT) {
150 amiga_custom.intreq = IF_BLIT; 78 amiga_custom.intreq = IF_BLIT;
151 m68k_handle_int(IRQ_AMIGA_BLIT); 79 generic_handle_irq(IRQ_AMIGA_BLIT);
152 } 80 }
153 81
154 /* if a copper interrupt */ 82 /* if a copper interrupt */
155 if (ints & IF_COPER) { 83 if (ints & IF_COPER) {
156 amiga_custom.intreq = IF_COPER; 84 amiga_custom.intreq = IF_COPER;
157 m68k_handle_int(IRQ_AMIGA_COPPER); 85 generic_handle_irq(IRQ_AMIGA_COPPER);
158 } 86 }
159 87
160 /* if a vertical blank interrupt */ 88 /* if a vertical blank interrupt */
161 if (ints & IF_VERTB) { 89 if (ints & IF_VERTB) {
162 amiga_custom.intreq = IF_VERTB; 90 amiga_custom.intreq = IF_VERTB;
163 m68k_handle_int(IRQ_AMIGA_VERTB); 91 generic_handle_irq(IRQ_AMIGA_VERTB);
164 } 92 }
165 return IRQ_HANDLED;
166} 93}
167 94
168static irqreturn_t ami_int4(int irq, void *dev_id) 95static void ami_int4(unsigned int irq, struct irq_desc *desc)
169{ 96{
170 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; 97 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
171 98
172 /* if audio 0 interrupt */ 99 /* if audio 0 interrupt */
173 if (ints & IF_AUD0) { 100 if (ints & IF_AUD0) {
174 amiga_custom.intreq = IF_AUD0; 101 amiga_custom.intreq = IF_AUD0;
175 m68k_handle_int(IRQ_AMIGA_AUD0); 102 generic_handle_irq(IRQ_AMIGA_AUD0);
176 } 103 }
177 104
178 /* if audio 1 interrupt */ 105 /* if audio 1 interrupt */
179 if (ints & IF_AUD1) { 106 if (ints & IF_AUD1) {
180 amiga_custom.intreq = IF_AUD1; 107 amiga_custom.intreq = IF_AUD1;
181 m68k_handle_int(IRQ_AMIGA_AUD1); 108 generic_handle_irq(IRQ_AMIGA_AUD1);
182 } 109 }
183 110
184 /* if audio 2 interrupt */ 111 /* if audio 2 interrupt */
185 if (ints & IF_AUD2) { 112 if (ints & IF_AUD2) {
186 amiga_custom.intreq = IF_AUD2; 113 amiga_custom.intreq = IF_AUD2;
187 m68k_handle_int(IRQ_AMIGA_AUD2); 114 generic_handle_irq(IRQ_AMIGA_AUD2);
188 } 115 }
189 116
190 /* if audio 3 interrupt */ 117 /* if audio 3 interrupt */
191 if (ints & IF_AUD3) { 118 if (ints & IF_AUD3) {
192 amiga_custom.intreq = IF_AUD3; 119 amiga_custom.intreq = IF_AUD3;
193 m68k_handle_int(IRQ_AMIGA_AUD3); 120 generic_handle_irq(IRQ_AMIGA_AUD3);
194 } 121 }
195 return IRQ_HANDLED;
196} 122}
197 123
198static irqreturn_t ami_int5(int irq, void *dev_id) 124static void ami_int5(unsigned int irq, struct irq_desc *desc)
199{ 125{
200 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; 126 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
201 127
202 /* if serial receive buffer full interrupt */ 128 /* if serial receive buffer full interrupt */
203 if (ints & IF_RBF) { 129 if (ints & IF_RBF) {
204 /* acknowledge of IF_RBF must be done by the serial interrupt */ 130 /* acknowledge of IF_RBF must be done by the serial interrupt */
205 m68k_handle_int(IRQ_AMIGA_RBF); 131 generic_handle_irq(IRQ_AMIGA_RBF);
206 } 132 }
207 133
208 /* if a disk sync interrupt */ 134 /* if a disk sync interrupt */
209 if (ints & IF_DSKSYN) { 135 if (ints & IF_DSKSYN) {
210 amiga_custom.intreq = IF_DSKSYN; 136 amiga_custom.intreq = IF_DSKSYN;
211 m68k_handle_int(IRQ_AMIGA_DSKSYN); 137 generic_handle_irq(IRQ_AMIGA_DSKSYN);
212 } 138 }
213 return IRQ_HANDLED; 139}
140
141
142/*
143 * void amiga_init_IRQ(void)
144 *
145 * Parameters: None
146 *
147 * Returns: Nothing
148 *
149 * This function should be called during kernel startup to initialize
150 * the amiga IRQ handling routines.
151 */
152
153void __init amiga_init_IRQ(void)
154{
155 m68k_setup_irq_controller(&amiga_irq_chip, handle_simple_irq, IRQ_USER,
156 AMI_STD_IRQS);
157
158 irq_set_chained_handler(IRQ_AUTO_1, ami_int1);
159 irq_set_chained_handler(IRQ_AUTO_3, ami_int3);
160 irq_set_chained_handler(IRQ_AUTO_4, ami_int4);
161 irq_set_chained_handler(IRQ_AUTO_5, ami_int5);
162
163 /* turn off PCMCIA interrupts */
164 if (AMIGAHW_PRESENT(PCMCIA))
165 gayle.inten = GAYLE_IRQ_IDE;
166
167 /* turn off all interrupts and enable the master interrupt bit */
168 amiga_custom.intena = 0x7fff;
169 amiga_custom.intreq = 0x7fff;
170 amiga_custom.intena = IF_SETCLR | IF_INTEN;
171
172 cia_init_IRQ(&ciaa_base);
173 cia_init_IRQ(&ciab_base);
214} 174}
diff --git a/arch/m68k/amiga/cia.c b/arch/m68k/amiga/cia.c
index ecd0f7ca6f0e..18c0e29976e3 100644
--- a/arch/m68k/amiga/cia.c
+++ b/arch/m68k/amiga/cia.c
@@ -93,13 +93,14 @@ static irqreturn_t cia_handler(int irq, void *dev_id)
93 amiga_custom.intreq = base->int_mask; 93 amiga_custom.intreq = base->int_mask;
94 for (; ints; mach_irq++, ints >>= 1) { 94 for (; ints; mach_irq++, ints >>= 1) {
95 if (ints & 1) 95 if (ints & 1)
96 m68k_handle_int(mach_irq); 96 generic_handle_irq(mach_irq);
97 } 97 }
98 return IRQ_HANDLED; 98 return IRQ_HANDLED;
99} 99}
100 100
101static void cia_enable_irq(unsigned int irq) 101static void cia_irq_enable(struct irq_data *data)
102{ 102{
103 unsigned int irq = data->irq;
103 unsigned char mask; 104 unsigned char mask;
104 105
105 if (irq >= IRQ_AMIGA_CIAB) { 106 if (irq >= IRQ_AMIGA_CIAB) {
@@ -113,19 +114,20 @@ static void cia_enable_irq(unsigned int irq)
113 } 114 }
114} 115}
115 116
116static void cia_disable_irq(unsigned int irq) 117static void cia_irq_disable(struct irq_data *data)
117{ 118{
119 unsigned int irq = data->irq;
120
118 if (irq >= IRQ_AMIGA_CIAB) 121 if (irq >= IRQ_AMIGA_CIAB)
119 cia_able_irq(&ciab_base, 1 << (irq - IRQ_AMIGA_CIAB)); 122 cia_able_irq(&ciab_base, 1 << (irq - IRQ_AMIGA_CIAB));
120 else 123 else
121 cia_able_irq(&ciaa_base, 1 << (irq - IRQ_AMIGA_CIAA)); 124 cia_able_irq(&ciaa_base, 1 << (irq - IRQ_AMIGA_CIAA));
122} 125}
123 126
124static struct irq_controller cia_irq_controller = { 127static struct irq_chip cia_irq_chip = {
125 .name = "cia", 128 .name = "cia",
126 .lock = __SPIN_LOCK_UNLOCKED(cia_irq_controller.lock), 129 .irq_enable = cia_irq_enable,
127 .enable = cia_enable_irq, 130 .irq_disable = cia_irq_disable,
128 .disable = cia_disable_irq,
129}; 131};
130 132
131/* 133/*
@@ -134,9 +136,9 @@ static struct irq_controller cia_irq_controller = {
134 * into this chain. 136 * into this chain.
135 */ 137 */
136 138
137static void auto_enable_irq(unsigned int irq) 139static void auto_irq_enable(struct irq_data *data)
138{ 140{
139 switch (irq) { 141 switch (data->irq) {
140 case IRQ_AUTO_2: 142 case IRQ_AUTO_2:
141 amiga_custom.intena = IF_SETCLR | IF_PORTS; 143 amiga_custom.intena = IF_SETCLR | IF_PORTS;
142 break; 144 break;
@@ -146,9 +148,9 @@ static void auto_enable_irq(unsigned int irq)
146 } 148 }
147} 149}
148 150
149static void auto_disable_irq(unsigned int irq) 151static void auto_irq_disable(struct irq_data *data)
150{ 152{
151 switch (irq) { 153 switch (data->irq) {
152 case IRQ_AUTO_2: 154 case IRQ_AUTO_2:
153 amiga_custom.intena = IF_PORTS; 155 amiga_custom.intena = IF_PORTS;
154 break; 156 break;
@@ -158,24 +160,25 @@ static void auto_disable_irq(unsigned int irq)
158 } 160 }
159} 161}
160 162
161static struct irq_controller auto_irq_controller = { 163static struct irq_chip auto_irq_chip = {
162 .name = "auto", 164 .name = "auto",
163 .lock = __SPIN_LOCK_UNLOCKED(auto_irq_controller.lock), 165 .irq_enable = auto_irq_enable,
164 .enable = auto_enable_irq, 166 .irq_disable = auto_irq_disable,
165 .disable = auto_disable_irq,
166}; 167};
167 168
168void __init cia_init_IRQ(struct ciabase *base) 169void __init cia_init_IRQ(struct ciabase *base)
169{ 170{
170 m68k_setup_irq_controller(&cia_irq_controller, base->cia_irq, CIA_IRQS); 171 m68k_setup_irq_controller(&cia_irq_chip, handle_simple_irq,
172 base->cia_irq, CIA_IRQS);
171 173
172 /* clear any pending interrupt and turn off all interrupts */ 174 /* clear any pending interrupt and turn off all interrupts */
173 cia_set_irq(base, CIA_ICR_ALL); 175 cia_set_irq(base, CIA_ICR_ALL);
174 cia_able_irq(base, CIA_ICR_ALL); 176 cia_able_irq(base, CIA_ICR_ALL);
175 177
176 /* override auto int and install CIA handler */ 178 /* override auto int and install CIA handler */
177 m68k_setup_irq_controller(&auto_irq_controller, base->handler_irq, 1); 179 m68k_setup_irq_controller(&auto_irq_chip, handle_simple_irq,
178 m68k_irq_startup(base->handler_irq); 180 base->handler_irq, 1);
181 m68k_irq_startup_irq(base->handler_irq);
179 if (request_irq(base->handler_irq, cia_handler, IRQF_SHARED, 182 if (request_irq(base->handler_irq, cia_handler, IRQF_SHARED,
180 base->name, base)) 183 base->name, base))
181 pr_err("Couldn't register %s interrupt\n", base->name); 184 pr_err("Couldn't register %s interrupt\n", base->name);
diff --git a/arch/m68k/apollo/dn_ints.c b/arch/m68k/apollo/dn_ints.c
index 5d47f3aa3810..17be1e7e2df2 100644
--- a/arch/m68k/apollo/dn_ints.c
+++ b/arch/m68k/apollo/dn_ints.c
@@ -1,19 +1,13 @@
1#include <linux/interrupt.h> 1#include <linux/interrupt.h>
2#include <linux/irq.h>
2 3
3#include <asm/irq.h>
4#include <asm/traps.h> 4#include <asm/traps.h>
5#include <asm/apollohw.h> 5#include <asm/apollohw.h>
6 6
7void dn_process_int(unsigned int irq, struct pt_regs *fp) 7unsigned int apollo_irq_startup(struct irq_data *data)
8{ 8{
9 __m68k_handle_int(irq, fp); 9 unsigned int irq = data->irq;
10 10
11 *(volatile unsigned char *)(pica)=0x20;
12 *(volatile unsigned char *)(picb)=0x20;
13}
14
15int apollo_irq_startup(unsigned int irq)
16{
17 if (irq < 8) 11 if (irq < 8)
18 *(volatile unsigned char *)(pica+1) &= ~(1 << irq); 12 *(volatile unsigned char *)(pica+1) &= ~(1 << irq);
19 else 13 else
@@ -21,24 +15,33 @@ int apollo_irq_startup(unsigned int irq)
21 return 0; 15 return 0;
22} 16}
23 17
24void apollo_irq_shutdown(unsigned int irq) 18void apollo_irq_shutdown(struct irq_data *data)
25{ 19{
20 unsigned int irq = data->irq;
21
26 if (irq < 8) 22 if (irq < 8)
27 *(volatile unsigned char *)(pica+1) |= (1 << irq); 23 *(volatile unsigned char *)(pica+1) |= (1 << irq);
28 else 24 else
29 *(volatile unsigned char *)(picb+1) |= (1 << (irq - 8)); 25 *(volatile unsigned char *)(picb+1) |= (1 << (irq - 8));
30} 26}
31 27
32static struct irq_controller apollo_irq_controller = { 28void apollo_irq_eoi(struct irq_data *data)
29{
30 *(volatile unsigned char *)(pica) = 0x20;
31 *(volatile unsigned char *)(picb) = 0x20;
32}
33
34static struct irq_chip apollo_irq_chip = {
33 .name = "apollo", 35 .name = "apollo",
34 .lock = __SPIN_LOCK_UNLOCKED(apollo_irq_controller.lock), 36 .irq_startup = apollo_irq_startup,
35 .startup = apollo_irq_startup, 37 .irq_shutdown = apollo_irq_shutdown,
36 .shutdown = apollo_irq_shutdown, 38 .irq_eoi = apollo_irq_eoi,
37}; 39};
38 40
39 41
40void __init dn_init_IRQ(void) 42void __init dn_init_IRQ(void)
41{ 43{
42 m68k_setup_user_interrupt(VEC_USER + 96, 16, dn_process_int); 44 m68k_setup_user_interrupt(VEC_USER + 96, 16);
43 m68k_setup_irq_controller(&apollo_irq_controller, IRQ_APOLLO, 16); 45 m68k_setup_irq_controller(&apollo_irq_chip, handle_fasteoi_irq,
46 IRQ_APOLLO, 16);
44} 47}
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index 26a804e67bce..6d196dadfdbc 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -60,243 +60,7 @@
60 * <asm/atariints.h>): Autovector interrupts are 1..7, then follow ST-MFP, 60 * <asm/atariints.h>): Autovector interrupts are 1..7, then follow ST-MFP,
61 * TT-MFP, SCC, and finally VME interrupts. Vector numbers for the latter can 61 * TT-MFP, SCC, and finally VME interrupts. Vector numbers for the latter can
62 * be allocated by atari_register_vme_int(). 62 * be allocated by atari_register_vme_int().
63 *
64 * Each interrupt can be of three types:
65 *
66 * - SLOW: The handler runs with all interrupts enabled, except the one it
67 * was called by (to avoid reentering). This should be the usual method.
68 * But it is currently possible only for MFP ints, since only the MFP
69 * offers an easy way to mask interrupts.
70 *
71 * - FAST: The handler runs with all interrupts disabled. This should be used
72 * only for really fast handlers, that just do actions immediately
73 * necessary, and let the rest do a bottom half or task queue.
74 *
75 * - PRIORITIZED: The handler can be interrupted by higher-level ints
76 * (greater IPL, no MFP priorities!). This is the method of choice for ints
77 * which should be slow, but are not from a MFP.
78 *
79 * The feature of more than one handler for one int source is still there, but
80 * only applicable if all handers are of the same type. To not slow down
81 * processing of ints with only one handler by the chaining feature, the list
82 * calling function atari_call_irq_list() is only plugged in at the time the
83 * second handler is registered.
84 *
85 * Implementation notes: For fast-as-possible int handling, there are separate
86 * entry points for each type (slow/fast/prio). The assembler handler calls
87 * the irq directly in the usual case, no C wrapper is involved. In case of
88 * multiple handlers, atari_call_irq_list() is registered as handler and calls
89 * in turn the real irq's. To ease access from assembler level to the irq
90 * function pointer and accompanying data, these two are stored in a separate
91 * array, irq_handler[]. The rest of data (type, name) are put into a second
92 * array, irq_param, that is accessed from C only. For each slow interrupt (32
93 * in all) there are separate handler functions, which makes it possible to
94 * hard-code the MFP register address and value, are necessary to mask the
95 * int. If there'd be only one generic function, lots of calculations would be
96 * needed to determine MFP register and int mask from the vector number :-(
97 *
98 * Furthermore, slow ints may not lower the IPL below its previous value
99 * (before the int happened). This is needed so that an int of class PRIO, on
100 * that this int may be stacked, cannot be reentered. This feature is
101 * implemented as follows: If the stack frame format is 1 (throwaway), the int
102 * is not stacked, and the IPL is anded with 0xfbff, resulting in a new level
103 * 2, which still blocks the HSYNC, but no interrupts of interest. If the
104 * frame format is 0, the int is nested, and the old IPL value can be found in
105 * the sr copy in the frame.
106 */
107
108#if 0
109
110#define NUM_INT_SOURCES (8 + NUM_ATARI_SOURCES)
111
112typedef void (*asm_irq_handler)(void);
113
114struct irqhandler {
115 irqreturn_t (*handler)(int, void *, struct pt_regs *);
116 void *dev_id;
117};
118
119struct irqparam {
120 unsigned long flags;
121 const char *devname;
122};
123
124/*
125 * Array with irq's and their parameter data. This array is accessed from low
126 * level assembler code, so an element size of 8 allows usage of index scaling
127 * addressing mode.
128 */ 63 */
129static struct irqhandler irq_handler[NUM_INT_SOURCES];
130
131/*
132 * This array hold the rest of parameters of int handlers: type
133 * (slow,fast,prio) and the name of the handler. These values are only
134 * accessed from C
135 */
136static struct irqparam irq_param[NUM_INT_SOURCES];
137
138/* check for valid int number (complex, sigh...) */
139#define IS_VALID_INTNO(n) \
140 ((n) > 0 && \
141 /* autovec and ST-MFP ok anyway */ \
142 (((n) < TTMFP_SOURCE_BASE) || \
143 /* TT-MFP ok if present */ \
144 ((n) >= TTMFP_SOURCE_BASE && (n) < SCC_SOURCE_BASE && \
145 ATARIHW_PRESENT(TT_MFP)) || \
146 /* SCC ok if present and number even */ \
147 ((n) >= SCC_SOURCE_BASE && (n) < VME_SOURCE_BASE && \
148 !((n) & 1) && ATARIHW_PRESENT(SCC)) || \
149 /* greater numbers ok if they are registered VME vectors */ \
150 ((n) >= VME_SOURCE_BASE && (n) < VME_SOURCE_BASE + VME_MAX_SOURCES && \
151 free_vme_vec_bitmap & (1 << ((n) - VME_SOURCE_BASE)))))
152
153
154/*
155 * Here start the assembler entry points for interrupts
156 */
157
158#define IRQ_NAME(nr) atari_slow_irq_##nr##_handler(void)
159
160#define BUILD_SLOW_IRQ(n) \
161asmlinkage void IRQ_NAME(n); \
162/* Dummy function to allow asm with operands. */ \
163void atari_slow_irq_##n##_dummy (void) { \
164__asm__ (__ALIGN_STR "\n" \
165"atari_slow_irq_" #n "_handler:\t" \
166" addl %6,%5\n" /* preempt_count() += HARDIRQ_OFFSET */ \
167 SAVE_ALL_INT "\n" \
168 GET_CURRENT(%%d0) "\n" \
169" andb #~(1<<(%c3&7)),%a4:w\n" /* mask this interrupt */ \
170 /* get old IPL from stack frame */ \
171" bfextu %%sp@(%c2){#5,#3},%%d0\n" \
172" movew %%sr,%%d1\n" \
173" bfins %%d0,%%d1{#21,#3}\n" \
174" movew %%d1,%%sr\n" /* set IPL = previous value */ \
175" addql #1,%a0\n" \
176" lea %a1,%%a0\n" \
177" pea %%sp@\n" /* push addr of frame */ \
178" movel %%a0@(4),%%sp@-\n" /* push handler data */ \
179" pea (%c3+8)\n" /* push int number */ \
180" movel %%a0@,%%a0\n" \
181" jbsr %%a0@\n" /* call the handler */ \
182" addql #8,%%sp\n" \
183" addql #4,%%sp\n" \
184" orw #0x0600,%%sr\n" \
185" andw #0xfeff,%%sr\n" /* set IPL = 6 again */ \
186" orb #(1<<(%c3&7)),%a4:w\n" /* now unmask the int again */ \
187" jbra ret_from_interrupt\n" \
188 : : "i" (&kstat_cpu(0).irqs[n+8]), "i" (&irq_handler[n+8]), \
189 "n" (PT_OFF_SR), "n" (n), \
190 "i" (n & 8 ? (n & 16 ? &tt_mfp.int_mk_a : &st_mfp.int_mk_a) \
191 : (n & 16 ? &tt_mfp.int_mk_b : &st_mfp.int_mk_b)), \
192 "m" (preempt_count()), "di" (HARDIRQ_OFFSET) \
193); \
194 for (;;); /* fake noreturn */ \
195}
196
197BUILD_SLOW_IRQ(0);
198BUILD_SLOW_IRQ(1);
199BUILD_SLOW_IRQ(2);
200BUILD_SLOW_IRQ(3);
201BUILD_SLOW_IRQ(4);
202BUILD_SLOW_IRQ(5);
203BUILD_SLOW_IRQ(6);
204BUILD_SLOW_IRQ(7);
205BUILD_SLOW_IRQ(8);
206BUILD_SLOW_IRQ(9);
207BUILD_SLOW_IRQ(10);
208BUILD_SLOW_IRQ(11);
209BUILD_SLOW_IRQ(12);
210BUILD_SLOW_IRQ(13);
211BUILD_SLOW_IRQ(14);
212BUILD_SLOW_IRQ(15);
213BUILD_SLOW_IRQ(16);
214BUILD_SLOW_IRQ(17);
215BUILD_SLOW_IRQ(18);
216BUILD_SLOW_IRQ(19);
217BUILD_SLOW_IRQ(20);
218BUILD_SLOW_IRQ(21);
219BUILD_SLOW_IRQ(22);
220BUILD_SLOW_IRQ(23);
221BUILD_SLOW_IRQ(24);
222BUILD_SLOW_IRQ(25);
223BUILD_SLOW_IRQ(26);
224BUILD_SLOW_IRQ(27);
225BUILD_SLOW_IRQ(28);
226BUILD_SLOW_IRQ(29);
227BUILD_SLOW_IRQ(30);
228BUILD_SLOW_IRQ(31);
229
230asm_irq_handler slow_handlers[32] = {
231 [0] = atari_slow_irq_0_handler,
232 [1] = atari_slow_irq_1_handler,
233 [2] = atari_slow_irq_2_handler,
234 [3] = atari_slow_irq_3_handler,
235 [4] = atari_slow_irq_4_handler,
236 [5] = atari_slow_irq_5_handler,
237 [6] = atari_slow_irq_6_handler,
238 [7] = atari_slow_irq_7_handler,
239 [8] = atari_slow_irq_8_handler,
240 [9] = atari_slow_irq_9_handler,
241 [10] = atari_slow_irq_10_handler,
242 [11] = atari_slow_irq_11_handler,
243 [12] = atari_slow_irq_12_handler,
244 [13] = atari_slow_irq_13_handler,
245 [14] = atari_slow_irq_14_handler,
246 [15] = atari_slow_irq_15_handler,
247 [16] = atari_slow_irq_16_handler,
248 [17] = atari_slow_irq_17_handler,
249 [18] = atari_slow_irq_18_handler,
250 [19] = atari_slow_irq_19_handler,
251 [20] = atari_slow_irq_20_handler,
252 [21] = atari_slow_irq_21_handler,
253 [22] = atari_slow_irq_22_handler,
254 [23] = atari_slow_irq_23_handler,
255 [24] = atari_slow_irq_24_handler,
256 [25] = atari_slow_irq_25_handler,
257 [26] = atari_slow_irq_26_handler,
258 [27] = atari_slow_irq_27_handler,
259 [28] = atari_slow_irq_28_handler,
260 [29] = atari_slow_irq_29_handler,
261 [30] = atari_slow_irq_30_handler,
262 [31] = atari_slow_irq_31_handler
263};
264
265asmlinkage void atari_fast_irq_handler( void );
266asmlinkage void atari_prio_irq_handler( void );
267
268/* Dummy function to allow asm with operands. */
269void atari_fast_prio_irq_dummy (void) {
270__asm__ (__ALIGN_STR "\n"
271"atari_fast_irq_handler:\n\t"
272 "orw #0x700,%%sr\n" /* disable all interrupts */
273"atari_prio_irq_handler:\n\t"
274 "addl %3,%2\n\t" /* preempt_count() += HARDIRQ_OFFSET */
275 SAVE_ALL_INT "\n\t"
276 GET_CURRENT(%%d0) "\n\t"
277 /* get vector number from stack frame and convert to source */
278 "bfextu %%sp@(%c1){#4,#10},%%d0\n\t"
279 "subw #(0x40-8),%%d0\n\t"
280 "jpl 1f\n\t"
281 "addw #(0x40-8-0x18),%%d0\n"
282 "1:\tlea %a0,%%a0\n\t"
283 "addql #1,%%a0@(%%d0:l:4)\n\t"
284 "lea irq_handler,%%a0\n\t"
285 "lea %%a0@(%%d0:l:8),%%a0\n\t"
286 "pea %%sp@\n\t" /* push frame address */
287 "movel %%a0@(4),%%sp@-\n\t" /* push handler data */
288 "movel %%d0,%%sp@-\n\t" /* push int number */
289 "movel %%a0@,%%a0\n\t"
290 "jsr %%a0@\n\t" /* and call the handler */
291 "addql #8,%%sp\n\t"
292 "addql #4,%%sp\n\t"
293 "jbra ret_from_interrupt"
294 : : "i" (&kstat_cpu(0).irqs), "n" (PT_OFF_FORMATVEC),
295 "m" (preempt_count()), "di" (HARDIRQ_OFFSET)
296);
297 for (;;);
298}
299#endif
300 64
301/* 65/*
302 * Bitmap for free interrupt vector numbers 66 * Bitmap for free interrupt vector numbers
@@ -320,31 +84,44 @@ extern void atari_microwire_cmd(int cmd);
320 84
321extern int atari_SCC_reset_done; 85extern int atari_SCC_reset_done;
322 86
323static int atari_startup_irq(unsigned int irq) 87static unsigned int atari_irq_startup(struct irq_data *data)
324{ 88{
325 m68k_irq_startup(irq); 89 unsigned int irq = data->irq;
90
91 m68k_irq_startup(data);
326 atari_turnon_irq(irq); 92 atari_turnon_irq(irq);
327 atari_enable_irq(irq); 93 atari_enable_irq(irq);
328 return 0; 94 return 0;
329} 95}
330 96
331static void atari_shutdown_irq(unsigned int irq) 97static void atari_irq_shutdown(struct irq_data *data)
332{ 98{
99 unsigned int irq = data->irq;
100
333 atari_disable_irq(irq); 101 atari_disable_irq(irq);
334 atari_turnoff_irq(irq); 102 atari_turnoff_irq(irq);
335 m68k_irq_shutdown(irq); 103 m68k_irq_shutdown(data);
336 104
337 if (irq == IRQ_AUTO_4) 105 if (irq == IRQ_AUTO_4)
338 vectors[VEC_INT4] = falcon_hblhandler; 106 vectors[VEC_INT4] = falcon_hblhandler;
339} 107}
340 108
341static struct irq_controller atari_irq_controller = { 109static void atari_irq_enable(struct irq_data *data)
110{
111 atari_enable_irq(data->irq);
112}
113
114static void atari_irq_disable(struct irq_data *data)
115{
116 atari_disable_irq(data->irq);
117}
118
119static struct irq_chip atari_irq_chip = {
342 .name = "atari", 120 .name = "atari",
343 .lock = __SPIN_LOCK_UNLOCKED(atari_irq_controller.lock), 121 .irq_startup = atari_irq_startup,
344 .startup = atari_startup_irq, 122 .irq_shutdown = atari_irq_shutdown,
345 .shutdown = atari_shutdown_irq, 123 .irq_enable = atari_irq_enable,
346 .enable = atari_enable_irq, 124 .irq_disable = atari_irq_disable,
347 .disable = atari_disable_irq,
348}; 125};
349 126
350/* 127/*
@@ -360,8 +137,9 @@ static struct irq_controller atari_irq_controller = {
360 137
361void __init atari_init_IRQ(void) 138void __init atari_init_IRQ(void)
362{ 139{
363 m68k_setup_user_interrupt(VEC_USER, NUM_ATARI_SOURCES - IRQ_USER, NULL); 140 m68k_setup_user_interrupt(VEC_USER, NUM_ATARI_SOURCES - IRQ_USER);
364 m68k_setup_irq_controller(&atari_irq_controller, 1, NUM_ATARI_SOURCES - 1); 141 m68k_setup_irq_controller(&atari_irq_chip, handle_simple_irq, 1,
142 NUM_ATARI_SOURCES - 1);
365 143
366 /* Initialize the MFP(s) */ 144 /* Initialize the MFP(s) */
367 145
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 1edd95095cb4..81286476f740 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -86,7 +86,7 @@ static void bvme6000_get_model(char *model)
86 */ 86 */
87static void __init bvme6000_init_IRQ(void) 87static void __init bvme6000_init_IRQ(void)
88{ 88{
89 m68k_setup_user_interrupt(VEC_USER, 192, NULL); 89 m68k_setup_user_interrupt(VEC_USER, 192);
90} 90}
91 91
92void __init config_bvme6000(void) 92void __init config_bvme6000(void)
diff --git a/arch/m68k/hp300/time.c b/arch/m68k/hp300/time.c
index f6312c7d8727..c87fe69b0728 100644
--- a/arch/m68k/hp300/time.c
+++ b/arch/m68k/hp300/time.c
@@ -70,7 +70,7 @@ void __init hp300_sched_init(irq_handler_t vector)
70 70
71 asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE)); 71 asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE));
72 72
73 if (request_irq(IRQ_AUTO_6, hp300_tick, IRQ_FLG_STD, "timer tick", vector)) 73 if (request_irq(IRQ_AUTO_6, hp300_tick, 0, "timer tick", vector))
74 pr_err("Couldn't register timer interrupt\n"); 74 pr_err("Couldn't register timer interrupt\n");
75 75
76 out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */ 76 out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */
diff --git a/arch/m68k/include/asm/hardirq.h b/arch/m68k/include/asm/hardirq.h
index 870e5347155b..db30ed276878 100644
--- a/arch/m68k/include/asm/hardirq.h
+++ b/arch/m68k/include/asm/hardirq.h
@@ -18,6 +18,11 @@
18 18
19#ifdef CONFIG_MMU 19#ifdef CONFIG_MMU
20 20
21static inline void ack_bad_irq(unsigned int irq)
22{
23 pr_crit("unexpected IRQ trap at vector %02x\n", irq);
24}
25
21/* entry.S is sensitive to the offsets of these fields */ 26/* entry.S is sensitive to the offsets of these fields */
22typedef struct { 27typedef struct {
23 unsigned int __softirq_pending; 28 unsigned int __softirq_pending;
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h
index 69ed0d74d532..6198df5ff245 100644
--- a/arch/m68k/include/asm/irq.h
+++ b/arch/m68k/include/asm/irq.h
@@ -27,11 +27,6 @@
27 27
28#ifdef CONFIG_MMU 28#ifdef CONFIG_MMU
29 29
30#include <linux/linkage.h>
31#include <linux/hardirq.h>
32#include <linux/irqreturn.h>
33#include <linux/spinlock_types.h>
34
35/* 30/*
36 * Interrupt source definitions 31 * Interrupt source definitions
37 * General interrupt sources are the level 1-7. 32 * General interrupt sources are the level 1-7.
@@ -54,10 +49,6 @@
54 49
55#define IRQ_USER 8 50#define IRQ_USER 8
56 51
57extern unsigned int irq_canonicalize(unsigned int irq);
58
59struct pt_regs;
60
61/* 52/*
62 * various flags for request_irq() - the Amiga now uses the standard 53 * various flags for request_irq() - the Amiga now uses the standard
63 * mechanism like all other architectures - IRQF_DISABLED and 54 * mechanism like all other architectures - IRQF_DISABLED and
@@ -71,57 +62,27 @@ struct pt_regs;
71#define IRQ_FLG_STD (0x8000) /* internally used */ 62#define IRQ_FLG_STD (0x8000) /* internally used */
72#endif 63#endif
73 64
74/* 65struct irq_data;
75 * This structure is used to chain together the ISRs for a particular 66struct irq_chip;
76 * interrupt source (if it supports chaining). 67struct irq_desc;
77 */ 68extern unsigned int m68k_irq_startup(struct irq_data *data);
78typedef struct irq_node { 69extern unsigned int m68k_irq_startup_irq(unsigned int irq);
79 irqreturn_t (*handler)(int, void *); 70extern void m68k_irq_shutdown(struct irq_data *data);
80 void *dev_id; 71extern void m68k_setup_auto_interrupt(void (*handler)(unsigned int,
81 struct irq_node *next; 72 struct pt_regs *));
82 unsigned long flags; 73extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt);
83 const char *devname; 74extern void m68k_setup_irq_controller(struct irq_chip *,
84} irq_node_t; 75 void (*handle)(unsigned int irq,
85 76 struct irq_desc *desc),
86/* 77 unsigned int irq, unsigned int cnt);
87 * This structure has only 4 elements for speed reasons
88 */
89struct irq_handler {
90 int (*handler)(int, void *);
91 unsigned long flags;
92 void *dev_id;
93 const char *devname;
94};
95
96struct irq_controller {
97 const char *name;
98 spinlock_t lock;
99 int (*startup)(unsigned int irq);
100 void (*shutdown)(unsigned int irq);
101 void (*enable)(unsigned int irq);
102 void (*disable)(unsigned int irq);
103};
104
105extern int m68k_irq_startup(unsigned int);
106extern void m68k_irq_shutdown(unsigned int);
107
108/*
109 * This function returns a new irq_node_t
110 */
111extern irq_node_t *new_irq_node(void);
112 78
113extern void m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *)); 79extern unsigned int irq_canonicalize(unsigned int irq);
114extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt,
115 void (*handler)(unsigned int, struct pt_regs *));
116extern void m68k_setup_irq_controller(struct irq_controller *, unsigned int, unsigned int);
117
118asmlinkage void m68k_handle_int(unsigned int);
119asmlinkage void __m68k_handle_int(unsigned int, struct pt_regs *);
120 80
121#else 81#else
122#define irq_canonicalize(irq) (irq) 82#define irq_canonicalize(irq) (irq)
123#endif /* CONFIG_MMU */ 83#endif /* CONFIG_MMU */
124 84
125asmlinkage void do_IRQ(int irq, struct pt_regs *regs); 85asmlinkage void do_IRQ(int irq, struct pt_regs *regs);
86extern atomic_t irq_err_count;
126 87
127#endif /* _M68K_IRQ_H_ */ 88#endif /* _M68K_IRQ_H_ */
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index c2a1c5eac1a6..12ebe43b008b 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -12,6 +12,8 @@ extern void mac_reset(void);
12extern void mac_poweroff(void); 12extern void mac_poweroff(void);
13extern void mac_init_IRQ(void); 13extern void mac_init_IRQ(void);
14extern int mac_irq_pending(unsigned int); 14extern int mac_irq_pending(unsigned int);
15extern void mac_irq_enable(struct irq_data *data);
16extern void mac_irq_disable(struct irq_data *data);
15 17
16/* 18/*
17 * Floppy driver magic hook - probably shouldn't be here 19 * Floppy driver magic hook - probably shouldn't be here
diff --git a/arch/m68k/include/asm/q40ints.h b/arch/m68k/include/asm/q40ints.h
index 3d970afb708f..22f12c9eb910 100644
--- a/arch/m68k/include/asm/q40ints.h
+++ b/arch/m68k/include/asm/q40ints.h
@@ -24,6 +24,3 @@
24#define Q40_IRQ10_MASK (1<<5) 24#define Q40_IRQ10_MASK (1<<5)
25#define Q40_IRQ14_MASK (1<<6) 25#define Q40_IRQ14_MASK (1<<6)
26#define Q40_IRQ15_MASK (1<<7) 26#define Q40_IRQ15_MASK (1<<7)
27
28extern unsigned long q40_probe_irq_on (void);
29extern int q40_probe_irq_off (unsigned long irqs);
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index e7f0f2e5ad44..c5696193281a 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -6,16 +6,15 @@ extra-$(CONFIG_MMU) := head.o
6extra-$(CONFIG_SUN3) := sun3-head.o 6extra-$(CONFIG_SUN3) := sun3-head.o
7extra-y += vmlinux.lds 7extra-y += vmlinux.lds
8 8
9obj-y := entry.o m68k_ksyms.o module.o process.o ptrace.o setup.o signal.o \ 9obj-y := entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o setup.o \
10 sys_m68k.o syscalltable.o time.o traps.o 10 signal.o sys_m68k.o syscalltable.o time.o traps.o
11 11
12obj-$(CONFIG_MMU) += ints.o devres.o vectors.o 12obj-$(CONFIG_MMU) += ints.o vectors.o
13devres-$(CONFIG_MMU) = ../../../kernel/irq/devres.o
14 13
15ifndef CONFIG_MMU_SUN3 14ifndef CONFIG_MMU_SUN3
16obj-y += dma.o 15obj-y += dma.o
17endif 16endif
18ifndef CONFIG_MMU 17ifndef CONFIG_MMU
19obj-y += init_task.o irq.o 18obj-y += init_task.o
20endif 19endif
21 20
diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S
index bd0ec05263b2..c713f514843d 100644
--- a/arch/m68k/kernel/entry_mm.S
+++ b/arch/m68k/kernel/entry_mm.S
@@ -48,7 +48,7 @@
48.globl sys_fork, sys_clone, sys_vfork 48.globl sys_fork, sys_clone, sys_vfork
49.globl ret_from_interrupt, bad_interrupt 49.globl ret_from_interrupt, bad_interrupt
50.globl auto_irqhandler_fixup 50.globl auto_irqhandler_fixup
51.globl user_irqvec_fixup, user_irqhandler_fixup 51.globl user_irqvec_fixup
52 52
53.text 53.text
54ENTRY(buserr) 54ENTRY(buserr)
@@ -207,7 +207,7 @@ ENTRY(auto_inthandler)
207 movel %sp,%sp@- 207 movel %sp,%sp@-
208 movel %d0,%sp@- | put vector # on stack 208 movel %d0,%sp@- | put vector # on stack
209auto_irqhandler_fixup = . + 2 209auto_irqhandler_fixup = . + 2
210 jsr __m68k_handle_int | process the IRQ 210 jsr do_IRQ | process the IRQ
211 addql #8,%sp | pop parameters off stack 211 addql #8,%sp | pop parameters off stack
212 212
213ret_from_interrupt: 213ret_from_interrupt:
@@ -240,8 +240,7 @@ user_irqvec_fixup = . + 2
240 240
241 movel %sp,%sp@- 241 movel %sp,%sp@-
242 movel %d0,%sp@- | put vector # on stack 242 movel %d0,%sp@- | put vector # on stack
243user_irqhandler_fixup = . + 2 243 jsr do_IRQ | process the IRQ
244 jsr __m68k_handle_int | process the IRQ
245 addql #8,%sp | pop parameters off stack 244 addql #8,%sp | pop parameters off stack
246 245
247 subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) 246 subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c
index 761ee0440c99..74fefac00899 100644
--- a/arch/m68k/kernel/ints.c
+++ b/arch/m68k/kernel/ints.c
@@ -4,25 +4,6 @@
4 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file COPYING in the main directory of this archive 5 * License. See the file COPYING in the main directory of this archive
6 * for more details. 6 * for more details.
7 *
8 * 07/03/96: Timer initialization, and thus mach_sched_init(),
9 * removed from request_irq() and moved to init_time().
10 * We should therefore consider renaming our add_isr() and
11 * remove_isr() to request_irq() and free_irq()
12 * respectively, so they are compliant with the other
13 * architectures. /Jes
14 * 11/07/96: Changed all add_/remove_isr() to request_/free_irq() calls.
15 * Removed irq list support, if any machine needs an irq server
16 * it must implement this itself (as it's already done), instead
17 * only default handler are used with mach_default_handler.
18 * request_irq got some flags different from other architectures:
19 * - IRQ_FLG_REPLACE : Replace an existing handler (the default one
20 * can be replaced without this flag)
21 * - IRQ_FLG_LOCK : handler can't be replaced
22 * There are other machine depending flags, see there
23 * If you want to replace a default handler you should know what
24 * you're doing, since it might handle different other irq sources
25 * which must be served /Roman Zippel
26 */ 7 */
27 8
28#include <linux/module.h> 9#include <linux/module.h>
@@ -47,33 +28,22 @@
47#endif 28#endif
48 29
49extern u32 auto_irqhandler_fixup[]; 30extern u32 auto_irqhandler_fixup[];
50extern u32 user_irqhandler_fixup[];
51extern u16 user_irqvec_fixup[]; 31extern u16 user_irqvec_fixup[];
52 32
53/* table for system interrupt handlers */
54static struct irq_node *irq_list[NR_IRQS];
55static struct irq_controller *irq_controller[NR_IRQS];
56static int irq_depth[NR_IRQS];
57
58static int m68k_first_user_vec; 33static int m68k_first_user_vec;
59 34
60static struct irq_controller auto_irq_controller = { 35static struct irq_chip auto_irq_chip = {
61 .name = "auto", 36 .name = "auto",
62 .lock = __SPIN_LOCK_UNLOCKED(auto_irq_controller.lock), 37 .irq_startup = m68k_irq_startup,
63 .startup = m68k_irq_startup, 38 .irq_shutdown = m68k_irq_shutdown,
64 .shutdown = m68k_irq_shutdown,
65}; 39};
66 40
67static struct irq_controller user_irq_controller = { 41static struct irq_chip user_irq_chip = {
68 .name = "user", 42 .name = "user",
69 .lock = __SPIN_LOCK_UNLOCKED(user_irq_controller.lock), 43 .irq_startup = m68k_irq_startup,
70 .startup = m68k_irq_startup, 44 .irq_shutdown = m68k_irq_shutdown,
71 .shutdown = m68k_irq_shutdown,
72}; 45};
73 46
74#define NUM_IRQ_NODES 100
75static irq_node_t nodes[NUM_IRQ_NODES];
76
77/* 47/*
78 * void init_IRQ(void) 48 * void init_IRQ(void)
79 * 49 *
@@ -96,7 +66,7 @@ void __init init_IRQ(void)
96 } 66 }
97 67
98 for (i = IRQ_AUTO_1; i <= IRQ_AUTO_7; i++) 68 for (i = IRQ_AUTO_1; i <= IRQ_AUTO_7; i++)
99 irq_controller[i] = &auto_irq_controller; 69 irq_set_chip_and_handler(i, &auto_irq_chip, handle_simple_irq);
100 70
101 mach_init_IRQ(); 71 mach_init_IRQ();
102} 72}
@@ -106,7 +76,7 @@ void __init init_IRQ(void)
106 * @handler: called from auto vector interrupts 76 * @handler: called from auto vector interrupts
107 * 77 *
108 * setup the handler to be called from auto vector interrupts instead of the 78 * setup the handler to be called from auto vector interrupts instead of the
109 * standard __m68k_handle_int(), it will be called with irq numbers in the range 79 * standard do_IRQ(), it will be called with irq numbers in the range
110 * from IRQ_AUTO_1 - IRQ_AUTO_7. 80 * from IRQ_AUTO_1 - IRQ_AUTO_7.
111 */ 81 */
112void __init m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *)) 82void __init m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *))
@@ -120,217 +90,49 @@ void __init m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_re
120 * m68k_setup_user_interrupt 90 * m68k_setup_user_interrupt
121 * @vec: first user vector interrupt to handle 91 * @vec: first user vector interrupt to handle
122 * @cnt: number of active user vector interrupts 92 * @cnt: number of active user vector interrupts
123 * @handler: called from user vector interrupts
124 * 93 *
125 * setup user vector interrupts, this includes activating the specified range 94 * setup user vector interrupts, this includes activating the specified range
126 * of interrupts, only then these interrupts can be requested (note: this is 95 * of interrupts, only then these interrupts can be requested (note: this is
127 * different from auto vector interrupts). An optional handler can be installed 96 * different from auto vector interrupts).
128 * to be called instead of the default __m68k_handle_int(), it will be called
129 * with irq numbers starting from IRQ_USER.
130 */ 97 */
131void __init m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt, 98void __init m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt)
132 void (*handler)(unsigned int, struct pt_regs *))
133{ 99{
134 int i; 100 int i;
135 101
136 BUG_ON(IRQ_USER + cnt > NR_IRQS); 102 BUG_ON(IRQ_USER + cnt > NR_IRQS);
137 m68k_first_user_vec = vec; 103 m68k_first_user_vec = vec;
138 for (i = 0; i < cnt; i++) 104 for (i = 0; i < cnt; i++)
139 irq_controller[IRQ_USER + i] = &user_irq_controller; 105 irq_set_chip(IRQ_USER + i, &user_irq_chip);
140 *user_irqvec_fixup = vec - IRQ_USER; 106 *user_irqvec_fixup = vec - IRQ_USER;
141 if (handler)
142 *user_irqhandler_fixup = (u32)handler;
143 flush_icache(); 107 flush_icache();
144} 108}
145 109
146/** 110/**
147 * m68k_setup_irq_controller 111 * m68k_setup_irq_controller
148 * @contr: irq controller which controls specified irq 112 * @chip: irq chip which controls specified irq
113 * @handle: flow handler which handles specified irq
149 * @irq: first irq to be managed by the controller 114 * @irq: first irq to be managed by the controller
115 * @cnt: number of irqs to be managed by the controller
150 * 116 *
151 * Change the controller for the specified range of irq, which will be used to 117 * Change the controller for the specified range of irq, which will be used to
152 * manage these irq. auto/user irq already have a default controller, which can 118 * manage these irq. auto/user irq already have a default controller, which can
153 * be changed as well, but the controller probably should use m68k_irq_startup/ 119 * be changed as well, but the controller probably should use m68k_irq_startup/
154 * m68k_irq_shutdown. 120 * m68k_irq_shutdown.
155 */ 121 */
156void m68k_setup_irq_controller(struct irq_controller *contr, unsigned int irq, 122void m68k_setup_irq_controller(struct irq_chip *chip,
123 irq_flow_handler_t handle, unsigned int irq,
157 unsigned int cnt) 124 unsigned int cnt)
158{ 125{
159 int i; 126 int i;
160 127
161 for (i = 0; i < cnt; i++) 128 for (i = 0; i < cnt; i++) {
162 irq_controller[irq + i] = contr; 129 irq_set_chip(irq + i, chip);
163} 130 if (handle)
164 131 irq_set_handler(irq + i, handle);
165irq_node_t *new_irq_node(void)
166{
167 irq_node_t *node;
168 short i;
169
170 for (node = nodes, i = NUM_IRQ_NODES-1; i >= 0; node++, i--) {
171 if (!node->handler) {
172 memset(node, 0, sizeof(*node));
173 return node;
174 }
175 } 132 }
176
177 printk ("new_irq_node: out of nodes\n");
178 return NULL;
179} 133}
180 134
181int setup_irq(unsigned int irq, struct irq_node *node) 135unsigned int m68k_irq_startup_irq(unsigned int irq)
182{
183 struct irq_controller *contr;
184 struct irq_node **prev;
185 unsigned long flags;
186
187 if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
188 printk("%s: Incorrect IRQ %d from %s\n",
189 __func__, irq, node->devname);
190 return -ENXIO;
191 }
192
193 spin_lock_irqsave(&contr->lock, flags);
194
195 prev = irq_list + irq;
196 if (*prev) {
197 /* Can't share interrupts unless both agree to */
198 if (!((*prev)->flags & node->flags & IRQF_SHARED)) {
199 spin_unlock_irqrestore(&contr->lock, flags);
200 return -EBUSY;
201 }
202 while (*prev)
203 prev = &(*prev)->next;
204 }
205
206 if (!irq_list[irq]) {
207 if (contr->startup)
208 contr->startup(irq);
209 else
210 contr->enable(irq);
211 }
212 node->next = NULL;
213 *prev = node;
214
215 spin_unlock_irqrestore(&contr->lock, flags);
216
217 return 0;
218}
219
220int request_irq(unsigned int irq,
221 irq_handler_t handler,
222 unsigned long flags, const char *devname, void *dev_id)
223{
224 struct irq_node *node;
225 int res;
226
227 node = new_irq_node();
228 if (!node)
229 return -ENOMEM;
230
231 node->handler = handler;
232 node->flags = flags;
233 node->dev_id = dev_id;
234 node->devname = devname;
235
236 res = setup_irq(irq, node);
237 if (res)
238 node->handler = NULL;
239
240 return res;
241}
242
243EXPORT_SYMBOL(request_irq);
244
245void free_irq(unsigned int irq, void *dev_id)
246{
247 struct irq_controller *contr;
248 struct irq_node **p, *node;
249 unsigned long flags;
250
251 if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
252 printk("%s: Incorrect IRQ %d\n", __func__, irq);
253 return;
254 }
255
256 spin_lock_irqsave(&contr->lock, flags);
257
258 p = irq_list + irq;
259 while ((node = *p)) {
260 if (node->dev_id == dev_id)
261 break;
262 p = &node->next;
263 }
264
265 if (node) {
266 *p = node->next;
267 node->handler = NULL;
268 } else
269 printk("%s: Removing probably wrong IRQ %d\n",
270 __func__, irq);
271
272 if (!irq_list[irq]) {
273 if (contr->shutdown)
274 contr->shutdown(irq);
275 else
276 contr->disable(irq);
277 }
278
279 spin_unlock_irqrestore(&contr->lock, flags);
280}
281
282EXPORT_SYMBOL(free_irq);
283
284void enable_irq(unsigned int irq)
285{
286 struct irq_controller *contr;
287 unsigned long flags;
288
289 if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
290 printk("%s: Incorrect IRQ %d\n",
291 __func__, irq);
292 return;
293 }
294
295 spin_lock_irqsave(&contr->lock, flags);
296 if (irq_depth[irq]) {
297 if (!--irq_depth[irq]) {
298 if (contr->enable)
299 contr->enable(irq);
300 }
301 } else
302 WARN_ON(1);
303 spin_unlock_irqrestore(&contr->lock, flags);
304}
305
306EXPORT_SYMBOL(enable_irq);
307
308void disable_irq(unsigned int irq)
309{
310 struct irq_controller *contr;
311 unsigned long flags;
312
313 if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
314 printk("%s: Incorrect IRQ %d\n",
315 __func__, irq);
316 return;
317 }
318
319 spin_lock_irqsave(&contr->lock, flags);
320 if (!irq_depth[irq]++) {
321 if (contr->disable)
322 contr->disable(irq);
323 }
324 spin_unlock_irqrestore(&contr->lock, flags);
325}
326
327EXPORT_SYMBOL(disable_irq);
328
329void disable_irq_nosync(unsigned int irq) __attribute__((alias("disable_irq")));
330
331EXPORT_SYMBOL(disable_irq_nosync);
332
333int m68k_irq_startup(unsigned int irq)
334{ 136{
335 if (irq <= IRQ_AUTO_7) 137 if (irq <= IRQ_AUTO_7)
336 vectors[VEC_SPUR + irq] = auto_inthandler; 138 vectors[VEC_SPUR + irq] = auto_inthandler;
@@ -339,41 +141,21 @@ int m68k_irq_startup(unsigned int irq)
339 return 0; 141 return 0;
340} 142}
341 143
342void m68k_irq_shutdown(unsigned int irq) 144unsigned int m68k_irq_startup(struct irq_data *data)
343{ 145{
344 if (irq <= IRQ_AUTO_7) 146 return m68k_irq_startup_irq(data->irq);
345 vectors[VEC_SPUR + irq] = bad_inthandler;
346 else
347 vectors[m68k_first_user_vec + irq - IRQ_USER] = bad_inthandler;
348} 147}
349 148
350 149void m68k_irq_shutdown(struct irq_data *data)
351/*
352 * Do we need these probe functions on the m68k?
353 *
354 * ... may be useful with ISA devices
355 */
356unsigned long probe_irq_on (void)
357{ 150{
358#ifdef CONFIG_Q40 151 unsigned int irq = data->irq;
359 if (MACH_IS_Q40)
360 return q40_probe_irq_on();
361#endif
362 return 0;
363}
364 152
365EXPORT_SYMBOL(probe_irq_on); 153 if (irq <= IRQ_AUTO_7)
366 154 vectors[VEC_SPUR + irq] = bad_inthandler;
367int probe_irq_off (unsigned long irqs) 155 else
368{ 156 vectors[m68k_first_user_vec + irq - IRQ_USER] = bad_inthandler;
369#ifdef CONFIG_Q40
370 if (MACH_IS_Q40)
371 return q40_probe_irq_off(irqs);
372#endif
373 return 0;
374} 157}
375 158
376EXPORT_SYMBOL(probe_irq_off);
377 159
378unsigned int irq_canonicalize(unsigned int irq) 160unsigned int irq_canonicalize(unsigned int irq)
379{ 161{
@@ -386,52 +168,9 @@ unsigned int irq_canonicalize(unsigned int irq)
386 168
387EXPORT_SYMBOL(irq_canonicalize); 169EXPORT_SYMBOL(irq_canonicalize);
388 170
389asmlinkage void m68k_handle_int(unsigned int irq)
390{
391 struct irq_node *node;
392 kstat_cpu(0).irqs[irq]++;
393 node = irq_list[irq];
394 do {
395 node->handler(irq, node->dev_id);
396 node = node->next;
397 } while (node);
398}
399
400asmlinkage void __m68k_handle_int(unsigned int irq, struct pt_regs *regs)
401{
402 struct pt_regs *old_regs;
403 old_regs = set_irq_regs(regs);
404 m68k_handle_int(irq);
405 set_irq_regs(old_regs);
406}
407 171
408asmlinkage void handle_badint(struct pt_regs *regs) 172asmlinkage void handle_badint(struct pt_regs *regs)
409{ 173{
410 kstat_cpu(0).irqs[0]++; 174 atomic_inc(&irq_err_count);
411 printk("unexpected interrupt from %u\n", regs->vector); 175 pr_warn("unexpected interrupt from %u\n", regs->vector);
412}
413
414int show_interrupts(struct seq_file *p, void *v)
415{
416 struct irq_controller *contr;
417 struct irq_node *node;
418 int i = *(loff_t *) v;
419
420 /* autovector interrupts */
421 if (irq_list[i]) {
422 contr = irq_controller[i];
423 node = irq_list[i];
424 seq_printf(p, "%-8s %3u: %10u %s", contr->name, i, kstat_cpu(0).irqs[i], node->devname);
425 while ((node = node->next))
426 seq_printf(p, ", %s", node->devname);
427 seq_puts(p, "\n");
428 }
429 return 0;
430}
431
432#ifdef CONFIG_PROC_FS
433void init_irq_proc(void)
434{
435 /* Insert /proc/irq driver here */
436} 176}
437#endif
diff --git a/arch/m68k/mac/baboon.c b/arch/m68k/mac/baboon.c
index 2a96bebd8969..b403924a1cad 100644
--- a/arch/m68k/mac/baboon.c
+++ b/arch/m68k/mac/baboon.c
@@ -11,6 +11,7 @@
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/irq.h>
14 15
15#include <asm/traps.h> 16#include <asm/traps.h>
16#include <asm/bootinfo.h> 17#include <asm/bootinfo.h>
@@ -20,9 +21,6 @@
20 21
21/* #define DEBUG_IRQS */ 22/* #define DEBUG_IRQS */
22 23
23extern void mac_enable_irq(unsigned int);
24extern void mac_disable_irq(unsigned int);
25
26int baboon_present; 24int baboon_present;
27static volatile struct baboon *baboon; 25static volatile struct baboon *baboon;
28static unsigned char baboon_disabled; 26static unsigned char baboon_disabled;
@@ -53,7 +51,7 @@ void __init baboon_init(void)
53 * Baboon interrupt handler. This works a lot like a VIA. 51 * Baboon interrupt handler. This works a lot like a VIA.
54 */ 52 */
55 53
56static irqreturn_t baboon_irq(int irq, void *dev_id) 54static void baboon_irq(unsigned int irq, struct irq_desc *desc)
57{ 55{
58 int irq_bit, irq_num; 56 int irq_bit, irq_num;
59 unsigned char events; 57 unsigned char events;
@@ -64,15 +62,16 @@ static irqreturn_t baboon_irq(int irq, void *dev_id)
64 (uint) baboon->mb_status); 62 (uint) baboon->mb_status);
65#endif 63#endif
66 64
67 if (!(events = baboon->mb_ifr & 0x07)) 65 events = baboon->mb_ifr & 0x07;
68 return IRQ_NONE; 66 if (!events)
67 return;
69 68
70 irq_num = IRQ_BABOON_0; 69 irq_num = IRQ_BABOON_0;
71 irq_bit = 1; 70 irq_bit = 1;
72 do { 71 do {
73 if (events & irq_bit) { 72 if (events & irq_bit) {
74 baboon->mb_ifr &= ~irq_bit; 73 baboon->mb_ifr &= ~irq_bit;
75 m68k_handle_int(irq_num); 74 generic_handle_irq(irq_num);
76 } 75 }
77 irq_bit <<= 1; 76 irq_bit <<= 1;
78 irq_num++; 77 irq_num++;
@@ -82,7 +81,6 @@ static irqreturn_t baboon_irq(int irq, void *dev_id)
82 /* for now we need to smash all interrupts */ 81 /* for now we need to smash all interrupts */
83 baboon->mb_ifr &= ~events; 82 baboon->mb_ifr &= ~events;
84#endif 83#endif
85 return IRQ_HANDLED;
86} 84}
87 85
88/* 86/*
@@ -92,8 +90,7 @@ static irqreturn_t baboon_irq(int irq, void *dev_id)
92void __init baboon_register_interrupts(void) 90void __init baboon_register_interrupts(void)
93{ 91{
94 baboon_disabled = 0; 92 baboon_disabled = 0;
95 if (request_irq(IRQ_NUBUS_C, baboon_irq, 0, "baboon", (void *)baboon)) 93 irq_set_chained_handler(IRQ_NUBUS_C, baboon_irq);
96 pr_err("Couldn't register baboon interrupt\n");
97} 94}
98 95
99/* 96/*
@@ -111,7 +108,7 @@ void baboon_irq_enable(int irq)
111 108
112 baboon_disabled &= ~(1 << irq_idx); 109 baboon_disabled &= ~(1 << irq_idx);
113 if (!baboon_disabled) 110 if (!baboon_disabled)
114 mac_enable_irq(IRQ_NUBUS_C); 111 mac_irq_enable(irq_get_irq_data(IRQ_NUBUS_C));
115} 112}
116 113
117void baboon_irq_disable(int irq) 114void baboon_irq_disable(int irq)
@@ -124,7 +121,7 @@ void baboon_irq_disable(int irq)
124 121
125 baboon_disabled |= 1 << irq_idx; 122 baboon_disabled |= 1 << irq_idx;
126 if (baboon_disabled) 123 if (baboon_disabled)
127 mac_disable_irq(IRQ_NUBUS_C); 124 mac_irq_disable(irq_get_irq_data(IRQ_NUBUS_C));
128} 125}
129 126
130void baboon_irq_clear(int irq) 127void baboon_irq_clear(int irq)
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
index 1ad4e9d80eba..a5462cc0bfd6 100644
--- a/arch/m68k/mac/iop.c
+++ b/arch/m68k/mac/iop.c
@@ -305,15 +305,13 @@ void __init iop_register_interrupts(void)
305{ 305{
306 if (iop_ism_present) { 306 if (iop_ism_present) {
307 if (oss_present) { 307 if (oss_present) {
308 if (request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq, 308 if (request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq, 0,
309 IRQ_FLG_LOCK, "ISM IOP", 309 "ISM IOP", (void *)IOP_NUM_ISM))
310 (void *) IOP_NUM_ISM))
311 pr_err("Couldn't register ISM IOP interrupt\n"); 310 pr_err("Couldn't register ISM IOP interrupt\n");
312 oss_irq_enable(IRQ_MAC_ADB); 311 oss_irq_enable(IRQ_MAC_ADB);
313 } else { 312 } else {
314 if (request_irq(IRQ_VIA2_0, iop_ism_irq, 313 if (request_irq(IRQ_VIA2_0, iop_ism_irq, 0, "ISM IOP",
315 IRQ_FLG_LOCK|IRQ_FLG_FAST, "ISM IOP", 314 (void *)IOP_NUM_ISM))
316 (void *) IOP_NUM_ISM))
317 pr_err("Couldn't register ISM IOP interrupt\n"); 315 pr_err("Couldn't register ISM IOP interrupt\n");
318 } 316 }
319 if (!iop_alive(iop_base[IOP_NUM_ISM])) { 317 if (!iop_alive(iop_base[IOP_NUM_ISM])) {
diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c
index f92190c159b4..ba220b70ab8c 100644
--- a/arch/m68k/mac/macints.c
+++ b/arch/m68k/mac/macints.c
@@ -190,14 +190,10 @@ irqreturn_t mac_debug_handler(int, void *);
190 190
191/* #define DEBUG_MACINTS */ 191/* #define DEBUG_MACINTS */
192 192
193void mac_enable_irq(unsigned int irq); 193static struct irq_chip mac_irq_chip = {
194void mac_disable_irq(unsigned int irq);
195
196static struct irq_controller mac_irq_controller = {
197 .name = "mac", 194 .name = "mac",
198 .lock = __SPIN_LOCK_UNLOCKED(mac_irq_controller.lock), 195 .irq_enable = mac_irq_enable,
199 .enable = mac_enable_irq, 196 .irq_disable = mac_irq_disable,
200 .disable = mac_disable_irq,
201}; 197};
202 198
203void __init mac_init_IRQ(void) 199void __init mac_init_IRQ(void)
@@ -205,7 +201,7 @@ void __init mac_init_IRQ(void)
205#ifdef DEBUG_MACINTS 201#ifdef DEBUG_MACINTS
206 printk("mac_init_IRQ(): Setting things up...\n"); 202 printk("mac_init_IRQ(): Setting things up...\n");
207#endif 203#endif
208 m68k_setup_irq_controller(&mac_irq_controller, IRQ_USER, 204 m68k_setup_irq_controller(&mac_irq_chip, handle_simple_irq, IRQ_USER,
209 NUM_MAC_SOURCES - IRQ_USER); 205 NUM_MAC_SOURCES - IRQ_USER);
210 /* Make sure the SONIC interrupt is cleared or things get ugly */ 206 /* Make sure the SONIC interrupt is cleared or things get ugly */
211#ifdef SHUTUP_SONIC 207#ifdef SHUTUP_SONIC
@@ -241,16 +237,17 @@ void __init mac_init_IRQ(void)
241} 237}
242 238
243/* 239/*
244 * mac_enable_irq - enable an interrupt source 240 * mac_irq_enable - enable an interrupt source
245 * mac_disable_irq - disable an interrupt source 241 * mac_irq_disable - disable an interrupt source
246 * mac_clear_irq - clears a pending interrupt 242 * mac_clear_irq - clears a pending interrupt
247 * mac_pending_irq - Returns the pending status of an IRQ (nonzero = pending) 243 * mac_irq_pending - returns the pending status of an IRQ (nonzero = pending)
248 * 244 *
249 * These routines are just dispatchers to the VIA/OSS/PSC routines. 245 * These routines are just dispatchers to the VIA/OSS/PSC routines.
250 */ 246 */
251 247
252void mac_enable_irq(unsigned int irq) 248void mac_irq_enable(struct irq_data *data)
253{ 249{
250 int irq = data->irq;
254 int irq_src = IRQ_SRC(irq); 251 int irq_src = IRQ_SRC(irq);
255 252
256 switch(irq_src) { 253 switch(irq_src) {
@@ -283,8 +280,9 @@ void mac_enable_irq(unsigned int irq)
283 } 280 }
284} 281}
285 282
286void mac_disable_irq(unsigned int irq) 283void mac_irq_disable(struct irq_data *data)
287{ 284{
285 int irq = data->irq;
288 int irq_src = IRQ_SRC(irq); 286 int irq_src = IRQ_SRC(irq);
289 287
290 switch(irq_src) { 288 switch(irq_src) {
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index a9c0f5ab4cc0..a4c82dab9ff1 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -19,6 +19,7 @@
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/irq.h>
22 23
23#include <asm/bootinfo.h> 24#include <asm/bootinfo.h>
24#include <asm/macintosh.h> 25#include <asm/macintosh.h>
@@ -29,10 +30,7 @@
29int oss_present; 30int oss_present;
30volatile struct mac_oss *oss; 31volatile struct mac_oss *oss;
31 32
32static irqreturn_t oss_irq(int, void *); 33extern void via1_irq(unsigned int irq, struct irq_desc *desc);
33static irqreturn_t oss_nubus_irq(int, void *);
34
35extern irqreturn_t via1_irq(int, void *);
36 34
37/* 35/*
38 * Initialize the OSS 36 * Initialize the OSS
@@ -60,26 +58,6 @@ void __init oss_init(void)
60} 58}
61 59
62/* 60/*
63 * Register the OSS and NuBus interrupt dispatchers.
64 */
65
66void __init oss_register_interrupts(void)
67{
68 if (request_irq(OSS_IRQLEV_SCSI, oss_irq, IRQ_FLG_LOCK,
69 "scsi", (void *) oss))
70 pr_err("Couldn't register %s interrupt\n", "scsi");
71 if (request_irq(OSS_IRQLEV_NUBUS, oss_nubus_irq, IRQ_FLG_LOCK,
72 "nubus", (void *) oss))
73 pr_err("Couldn't register %s interrupt\n", "nubus");
74 if (request_irq(OSS_IRQLEV_SOUND, oss_irq, IRQ_FLG_LOCK,
75 "sound", (void *) oss))
76 pr_err("Couldn't register %s interrupt\n", "sound");
77 if (request_irq(OSS_IRQLEV_VIA1, via1_irq, IRQ_FLG_LOCK,
78 "via1", (void *) via1))
79 pr_err("Couldn't register %s interrupt\n", "via1");
80}
81
82/*
83 * Initialize OSS for Nubus access 61 * Initialize OSS for Nubus access
84 */ 62 */
85 63
@@ -92,17 +70,17 @@ void __init oss_nubus_init(void)
92 * and SCSI; everything else is routed to its own autovector IRQ. 70 * and SCSI; everything else is routed to its own autovector IRQ.
93 */ 71 */
94 72
95static irqreturn_t oss_irq(int irq, void *dev_id) 73static void oss_irq(unsigned int irq, struct irq_desc *desc)
96{ 74{
97 int events; 75 int events;
98 76
99 events = oss->irq_pending & (OSS_IP_SOUND|OSS_IP_SCSI); 77 events = oss->irq_pending & (OSS_IP_SOUND|OSS_IP_SCSI);
100 if (!events) 78 if (!events)
101 return IRQ_NONE; 79 return;
102 80
103#ifdef DEBUG_IRQS 81#ifdef DEBUG_IRQS
104 if ((console_loglevel == 10) && !(events & OSS_IP_SCSI)) { 82 if ((console_loglevel == 10) && !(events & OSS_IP_SCSI)) {
105 printk("oss_irq: irq %d events = 0x%04X\n", irq, 83 printk("oss_irq: irq %u events = 0x%04X\n", irq,
106 (int) oss->irq_pending); 84 (int) oss->irq_pending);
107 } 85 }
108#endif 86#endif
@@ -113,11 +91,10 @@ static irqreturn_t oss_irq(int irq, void *dev_id)
113 /* FIXME: call sound handler */ 91 /* FIXME: call sound handler */
114 } else if (events & OSS_IP_SCSI) { 92 } else if (events & OSS_IP_SCSI) {
115 oss->irq_pending &= ~OSS_IP_SCSI; 93 oss->irq_pending &= ~OSS_IP_SCSI;
116 m68k_handle_int(IRQ_MAC_SCSI); 94 generic_handle_irq(IRQ_MAC_SCSI);
117 } else { 95 } else {
118 /* FIXME: error check here? */ 96 /* FIXME: error check here? */
119 } 97 }
120 return IRQ_HANDLED;
121} 98}
122 99
123/* 100/*
@@ -126,13 +103,13 @@ static irqreturn_t oss_irq(int irq, void *dev_id)
126 * Unlike the VIA/RBV this is on its own autovector interrupt level. 103 * Unlike the VIA/RBV this is on its own autovector interrupt level.
127 */ 104 */
128 105
129static irqreturn_t oss_nubus_irq(int irq, void *dev_id) 106static void oss_nubus_irq(unsigned int irq, struct irq_desc *desc)
130{ 107{
131 int events, irq_bit, i; 108 int events, irq_bit, i;
132 109
133 events = oss->irq_pending & OSS_IP_NUBUS; 110 events = oss->irq_pending & OSS_IP_NUBUS;
134 if (!events) 111 if (!events)
135 return IRQ_NONE; 112 return;
136 113
137#ifdef DEBUG_NUBUS_INT 114#ifdef DEBUG_NUBUS_INT
138 if (console_loglevel > 7) { 115 if (console_loglevel > 7) {
@@ -148,10 +125,21 @@ static irqreturn_t oss_nubus_irq(int irq, void *dev_id)
148 irq_bit >>= 1; 125 irq_bit >>= 1;
149 if (events & irq_bit) { 126 if (events & irq_bit) {
150 oss->irq_pending &= ~irq_bit; 127 oss->irq_pending &= ~irq_bit;
151 m68k_handle_int(NUBUS_SOURCE_BASE + i); 128 generic_handle_irq(NUBUS_SOURCE_BASE + i);
152 } 129 }
153 } while(events & (irq_bit - 1)); 130 } while(events & (irq_bit - 1));
154 return IRQ_HANDLED; 131}
132
133/*
134 * Register the OSS and NuBus interrupt dispatchers.
135 */
136
137void __init oss_register_interrupts(void)
138{
139 irq_set_chained_handler(OSS_IRQLEV_SCSI, oss_irq);
140 irq_set_chained_handler(OSS_IRQLEV_NUBUS, oss_nubus_irq);
141 irq_set_chained_handler(OSS_IRQLEV_SOUND, oss_irq);
142 irq_set_chained_handler(OSS_IRQLEV_VIA1, via1_irq);
155} 143}
156 144
157/* 145/*
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
index a4c3eb60706e..e6c2d20f328d 100644
--- a/arch/m68k/mac/psc.c
+++ b/arch/m68k/mac/psc.c
@@ -18,6 +18,7 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/irq.h>
21 22
22#include <asm/traps.h> 23#include <asm/traps.h>
23#include <asm/bootinfo.h> 24#include <asm/bootinfo.h>
@@ -30,8 +31,6 @@
30int psc_present; 31int psc_present;
31volatile __u8 *psc; 32volatile __u8 *psc;
32 33
33irqreturn_t psc_irq(int, void *);
34
35/* 34/*
36 * Debugging dump, used in various places to see what's going on. 35 * Debugging dump, used in various places to see what's going on.
37 */ 36 */
@@ -112,52 +111,52 @@ void __init psc_init(void)
112} 111}
113 112
114/* 113/*
115 * Register the PSC interrupt dispatchers for autovector interrupts 3-6.
116 */
117
118void __init psc_register_interrupts(void)
119{
120 if (request_irq(IRQ_AUTO_3, psc_irq, 0, "psc3", (void *) 0x30))
121 pr_err("Couldn't register psc%d interrupt\n", 3);
122 if (request_irq(IRQ_AUTO_4, psc_irq, 0, "psc4", (void *) 0x40))
123 pr_err("Couldn't register psc%d interrupt\n", 4);
124 if (request_irq(IRQ_AUTO_5, psc_irq, 0, "psc5", (void *) 0x50))
125 pr_err("Couldn't register psc%d interrupt\n", 5);
126 if (request_irq(IRQ_AUTO_6, psc_irq, 0, "psc6", (void *) 0x60))
127 pr_err("Couldn't register psc%d interrupt\n", 6);
128}
129
130/*
131 * PSC interrupt handler. It's a lot like the VIA interrupt handler. 114 * PSC interrupt handler. It's a lot like the VIA interrupt handler.
132 */ 115 */
133 116
134irqreturn_t psc_irq(int irq, void *dev_id) 117static void psc_irq(unsigned int irq, struct irq_desc *desc)
135{ 118{
136 int pIFR = pIFRbase + ((int) dev_id); 119 unsigned int offset = (unsigned int)irq_desc_get_handler_data(desc);
137 int pIER = pIERbase + ((int) dev_id); 120 int pIFR = pIFRbase + offset;
121 int pIER = pIERbase + offset;
138 int irq_num; 122 int irq_num;
139 unsigned char irq_bit, events; 123 unsigned char irq_bit, events;
140 124
141#ifdef DEBUG_IRQS 125#ifdef DEBUG_IRQS
142 printk("psc_irq: irq %d pIFR = 0x%02X pIER = 0x%02X\n", 126 printk("psc_irq: irq %u pIFR = 0x%02X pIER = 0x%02X\n",
143 irq, (int) psc_read_byte(pIFR), (int) psc_read_byte(pIER)); 127 irq, (int) psc_read_byte(pIFR), (int) psc_read_byte(pIER));
144#endif 128#endif
145 129
146 events = psc_read_byte(pIFR) & psc_read_byte(pIER) & 0xF; 130 events = psc_read_byte(pIFR) & psc_read_byte(pIER) & 0xF;
147 if (!events) 131 if (!events)
148 return IRQ_NONE; 132 return;
149 133
150 irq_num = irq << 3; 134 irq_num = irq << 3;
151 irq_bit = 1; 135 irq_bit = 1;
152 do { 136 do {
153 if (events & irq_bit) { 137 if (events & irq_bit) {
154 psc_write_byte(pIFR, irq_bit); 138 psc_write_byte(pIFR, irq_bit);
155 m68k_handle_int(irq_num); 139 generic_handle_irq(irq_num);
156 } 140 }
157 irq_num++; 141 irq_num++;
158 irq_bit <<= 1; 142 irq_bit <<= 1;
159 } while (events >= irq_bit); 143 } while (events >= irq_bit);
160 return IRQ_HANDLED; 144}
145
146/*
147 * Register the PSC interrupt dispatchers for autovector interrupts 3-6.
148 */
149
150void __init psc_register_interrupts(void)
151{
152 irq_set_chained_handler(IRQ_AUTO_3, psc_irq);
153 irq_set_handler_data(IRQ_AUTO_3, (void *)0x30);
154 irq_set_chained_handler(IRQ_AUTO_4, psc_irq);
155 irq_set_handler_data(IRQ_AUTO_4, (void *)0x40);
156 irq_set_chained_handler(IRQ_AUTO_5, psc_irq);
157 irq_set_handler_data(IRQ_AUTO_5, (void *)0x50);
158 irq_set_chained_handler(IRQ_AUTO_6, psc_irq);
159 irq_set_handler_data(IRQ_AUTO_6, (void *)0x60);
161} 160}
162 161
163void psc_irq_enable(int irq) { 162void psc_irq_enable(int irq) {
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index e71166daec6a..f1600ad26621 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -28,6 +28,7 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/irq.h>
31 32
32#include <asm/bootinfo.h> 33#include <asm/bootinfo.h>
33#include <asm/macintosh.h> 34#include <asm/macintosh.h>
@@ -77,9 +78,6 @@ static int gIER,gIFR,gBufA,gBufB;
77static u8 nubus_disabled; 78static u8 nubus_disabled;
78 79
79void via_debug_dump(void); 80void via_debug_dump(void);
80irqreturn_t via1_irq(int, void *);
81irqreturn_t via2_irq(int, void *);
82irqreturn_t via_nubus_irq(int, void *);
83void via_irq_enable(int irq); 81void via_irq_enable(int irq);
84void via_irq_disable(int irq); 82void via_irq_disable(int irq);
85void via_irq_clear(int irq); 83void via_irq_clear(int irq);
@@ -281,40 +279,11 @@ void __init via_init_clock(irq_handler_t func)
281 via1[vT1CL] = MAC_CLOCK_LOW; 279 via1[vT1CL] = MAC_CLOCK_LOW;
282 via1[vT1CH] = MAC_CLOCK_HIGH; 280 via1[vT1CH] = MAC_CLOCK_HIGH;
283 281
284 if (request_irq(IRQ_MAC_TIMER_1, func, IRQ_FLG_LOCK, "timer", func)) 282 if (request_irq(IRQ_MAC_TIMER_1, func, 0, "timer", func))
285 pr_err("Couldn't register %s interrupt\n", "timer"); 283 pr_err("Couldn't register %s interrupt\n", "timer");
286} 284}
287 285
288/* 286/*
289 * Register the interrupt dispatchers for VIA or RBV machines only.
290 */
291
292void __init via_register_interrupts(void)
293{
294 if (via_alt_mapping) {
295 if (request_irq(IRQ_AUTO_1, via1_irq,
296 IRQ_FLG_LOCK|IRQ_FLG_FAST, "software",
297 (void *) via1))
298 pr_err("Couldn't register %s interrupt\n", "software");
299 if (request_irq(IRQ_AUTO_6, via1_irq,
300 IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
301 (void *) via1))
302 pr_err("Couldn't register %s interrupt\n", "via1");
303 } else {
304 if (request_irq(IRQ_AUTO_1, via1_irq,
305 IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
306 (void *) via1))
307 pr_err("Couldn't register %s interrupt\n", "via1");
308 }
309 if (request_irq(IRQ_AUTO_2, via2_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
310 "via2", (void *) via2))
311 pr_err("Couldn't register %s interrupt\n", "via2");
312 if (request_irq(IRQ_MAC_NUBUS, via_nubus_irq,
313 IRQ_FLG_LOCK|IRQ_FLG_FAST, "nubus", (void *) via2))
314 pr_err("Couldn't register %s interrupt\n", "nubus");
315}
316
317/*
318 * Debugging dump, used in various places to see what's going on. 287 * Debugging dump, used in various places to see what's going on.
319 */ 288 */
320 289
@@ -446,48 +415,46 @@ void __init via_nubus_init(void)
446 * via6522.c :-), disable/pending masks added. 415 * via6522.c :-), disable/pending masks added.
447 */ 416 */
448 417
449irqreturn_t via1_irq(int irq, void *dev_id) 418void via1_irq(unsigned int irq, struct irq_desc *desc)
450{ 419{
451 int irq_num; 420 int irq_num;
452 unsigned char irq_bit, events; 421 unsigned char irq_bit, events;
453 422
454 events = via1[vIFR] & via1[vIER] & 0x7F; 423 events = via1[vIFR] & via1[vIER] & 0x7F;
455 if (!events) 424 if (!events)
456 return IRQ_NONE; 425 return;
457 426
458 irq_num = VIA1_SOURCE_BASE; 427 irq_num = VIA1_SOURCE_BASE;
459 irq_bit = 1; 428 irq_bit = 1;
460 do { 429 do {
461 if (events & irq_bit) { 430 if (events & irq_bit) {
462 via1[vIFR] = irq_bit; 431 via1[vIFR] = irq_bit;
463 m68k_handle_int(irq_num); 432 generic_handle_irq(irq_num);
464 } 433 }
465 ++irq_num; 434 ++irq_num;
466 irq_bit <<= 1; 435 irq_bit <<= 1;
467 } while (events >= irq_bit); 436 } while (events >= irq_bit);
468 return IRQ_HANDLED;
469} 437}
470 438
471irqreturn_t via2_irq(int irq, void *dev_id) 439static void via2_irq(unsigned int irq, struct irq_desc *desc)
472{ 440{
473 int irq_num; 441 int irq_num;
474 unsigned char irq_bit, events; 442 unsigned char irq_bit, events;
475 443
476 events = via2[gIFR] & via2[gIER] & 0x7F; 444 events = via2[gIFR] & via2[gIER] & 0x7F;
477 if (!events) 445 if (!events)
478 return IRQ_NONE; 446 return;
479 447
480 irq_num = VIA2_SOURCE_BASE; 448 irq_num = VIA2_SOURCE_BASE;
481 irq_bit = 1; 449 irq_bit = 1;
482 do { 450 do {
483 if (events & irq_bit) { 451 if (events & irq_bit) {
484 via2[gIFR] = irq_bit | rbv_clear; 452 via2[gIFR] = irq_bit | rbv_clear;
485 m68k_handle_int(irq_num); 453 generic_handle_irq(irq_num);
486 } 454 }
487 ++irq_num; 455 ++irq_num;
488 irq_bit <<= 1; 456 irq_bit <<= 1;
489 } while (events >= irq_bit); 457 } while (events >= irq_bit);
490 return IRQ_HANDLED;
491} 458}
492 459
493/* 460/*
@@ -495,7 +462,7 @@ irqreturn_t via2_irq(int irq, void *dev_id)
495 * VIA2 dispatcher as a fast interrupt handler. 462 * VIA2 dispatcher as a fast interrupt handler.
496 */ 463 */
497 464
498irqreturn_t via_nubus_irq(int irq, void *dev_id) 465void via_nubus_irq(unsigned int irq, struct irq_desc *desc)
499{ 466{
500 int slot_irq; 467 int slot_irq;
501 unsigned char slot_bit, events; 468 unsigned char slot_bit, events;
@@ -506,7 +473,7 @@ irqreturn_t via_nubus_irq(int irq, void *dev_id)
506 else 473 else
507 events &= ~via2[vDirA]; 474 events &= ~via2[vDirA];
508 if (!events) 475 if (!events)
509 return IRQ_NONE; 476 return;
510 477
511 do { 478 do {
512 slot_irq = IRQ_NUBUS_F; 479 slot_irq = IRQ_NUBUS_F;
@@ -514,7 +481,7 @@ irqreturn_t via_nubus_irq(int irq, void *dev_id)
514 do { 481 do {
515 if (events & slot_bit) { 482 if (events & slot_bit) {
516 events &= ~slot_bit; 483 events &= ~slot_bit;
517 m68k_handle_int(slot_irq); 484 generic_handle_irq(slot_irq);
518 } 485 }
519 --slot_irq; 486 --slot_irq;
520 slot_bit >>= 1; 487 slot_bit >>= 1;
@@ -528,7 +495,24 @@ irqreturn_t via_nubus_irq(int irq, void *dev_id)
528 else 495 else
529 events &= ~via2[vDirA]; 496 events &= ~via2[vDirA];
530 } while (events); 497 } while (events);
531 return IRQ_HANDLED; 498}
499
500/*
501 * Register the interrupt dispatchers for VIA or RBV machines only.
502 */
503
504void __init via_register_interrupts(void)
505{
506 if (via_alt_mapping) {
507 /* software interrupt */
508 irq_set_chained_handler(IRQ_AUTO_1, via1_irq);
509 /* via1 interrupt */
510 irq_set_chained_handler(IRQ_AUTO_6, via1_irq);
511 } else {
512 irq_set_chained_handler(IRQ_AUTO_1, via1_irq);
513 }
514 irq_set_chained_handler(IRQ_AUTO_2, via2_irq);
515 irq_set_chained_handler(IRQ_MAC_NUBUS, via_nubus_irq);
532} 516}
533 517
534void via_irq_enable(int irq) { 518void via_irq_enable(int irq) {
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index 6cb9c3a9b6c9..5de924ef42ed 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -81,7 +81,7 @@ static void mvme147_get_model(char *model)
81 81
82void __init mvme147_init_IRQ(void) 82void __init mvme147_init_IRQ(void)
83{ 83{
84 m68k_setup_user_interrupt(VEC_USER, 192, NULL); 84 m68k_setup_user_interrupt(VEC_USER, 192);
85} 85}
86 86
87void __init config_mvme147(void) 87void __init config_mvme147(void)
@@ -114,8 +114,7 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
114void mvme147_sched_init (irq_handler_t timer_routine) 114void mvme147_sched_init (irq_handler_t timer_routine)
115{ 115{
116 tick_handler = timer_routine; 116 tick_handler = timer_routine;
117 if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, IRQ_FLG_REPLACE, 117 if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, 0, "timer 1", NULL))
118 "timer 1", NULL))
119 pr_err("Couldn't register timer interrupt\n"); 118 pr_err("Couldn't register timer interrupt\n");
120 119
121 /* Init the clock with a value */ 120 /* Init the clock with a value */
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 0b28e2621653..31a66d99cbca 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -117,7 +117,7 @@ static void mvme16x_get_hardware_list(struct seq_file *m)
117 117
118static void __init mvme16x_init_IRQ (void) 118static void __init mvme16x_init_IRQ (void)
119{ 119{
120 m68k_setup_user_interrupt(VEC_USER, 192, NULL); 120 m68k_setup_user_interrupt(VEC_USER, 192);
121} 121}
122 122
123#define pcc2chip ((volatile u_char *)0xfff42000) 123#define pcc2chip ((volatile u_char *)0xfff42000)
diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c
index 9f0e3d59bf92..2b888491f29a 100644
--- a/arch/m68k/q40/q40ints.c
+++ b/arch/m68k/q40/q40ints.c
@@ -15,10 +15,10 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/irq.h>
18 19
19#include <asm/ptrace.h> 20#include <asm/ptrace.h>
20#include <asm/system.h> 21#include <asm/system.h>
21#include <asm/irq.h>
22#include <asm/traps.h> 22#include <asm/traps.h>
23 23
24#include <asm/q40_master.h> 24#include <asm/q40_master.h>
@@ -35,35 +35,36 @@
35*/ 35*/
36 36
37static void q40_irq_handler(unsigned int, struct pt_regs *fp); 37static void q40_irq_handler(unsigned int, struct pt_regs *fp);
38static void q40_enable_irq(unsigned int); 38static void q40_irq_enable(struct irq_data *data);
39static void q40_disable_irq(unsigned int); 39static void q40_irq_disable(struct irq_data *data);
40 40
41unsigned short q40_ablecount[35]; 41unsigned short q40_ablecount[35];
42unsigned short q40_state[35]; 42unsigned short q40_state[35];
43 43
44static int q40_irq_startup(unsigned int irq) 44static unsigned int q40_irq_startup(struct irq_data *data)
45{ 45{
46 unsigned int irq = data->irq;
47
46 /* test for ISA ints not implemented by HW */ 48 /* test for ISA ints not implemented by HW */
47 switch (irq) { 49 switch (irq) {
48 case 1: case 2: case 8: case 9: 50 case 1: case 2: case 8: case 9:
49 case 11: case 12: case 13: 51 case 11: case 12: case 13:
50 printk("%s: ISA IRQ %d not implemented by HW\n", __func__, irq); 52 printk("%s: ISA IRQ %d not implemented by HW\n", __func__, irq);
51 return -ENXIO; 53 /* FIXME return -ENXIO; */
52 } 54 }
53 return 0; 55 return 0;
54} 56}
55 57
56static void q40_irq_shutdown(unsigned int irq) 58static void q40_irq_shutdown(struct irq_data *data)
57{ 59{
58} 60}
59 61
60static struct irq_controller q40_irq_controller = { 62static struct irq_chip q40_irq_chip = {
61 .name = "q40", 63 .name = "q40",
62 .lock = __SPIN_LOCK_UNLOCKED(q40_irq_controller.lock), 64 .irq_startup = q40_irq_startup,
63 .startup = q40_irq_startup, 65 .irq_shutdown = q40_irq_shutdown,
64 .shutdown = q40_irq_shutdown, 66 .irq_enable = q40_irq_enable,
65 .enable = q40_enable_irq, 67 .irq_disable = q40_irq_disable,
66 .disable = q40_disable_irq,
67}; 68};
68 69
69/* 70/*
@@ -81,13 +82,14 @@ static int disabled;
81 82
82void __init q40_init_IRQ(void) 83void __init q40_init_IRQ(void)
83{ 84{
84 m68k_setup_irq_controller(&q40_irq_controller, 1, Q40_IRQ_MAX); 85 m68k_setup_irq_controller(&q40_irq_chip, handle_simple_irq, 1,
86 Q40_IRQ_MAX);
85 87
86 /* setup handler for ISA ints */ 88 /* setup handler for ISA ints */
87 m68k_setup_auto_interrupt(q40_irq_handler); 89 m68k_setup_auto_interrupt(q40_irq_handler);
88 90
89 m68k_irq_startup(IRQ_AUTO_2); 91 m68k_irq_startup_irq(IRQ_AUTO_2);
90 m68k_irq_startup(IRQ_AUTO_4); 92 m68k_irq_startup_irq(IRQ_AUTO_4);
91 93
92 /* now enable some ints.. */ 94 /* now enable some ints.. */
93 master_outb(1, EXT_ENABLE_REG); /* ISA IRQ 5-15 */ 95 master_outb(1, EXT_ENABLE_REG); /* ISA IRQ 5-15 */
@@ -218,11 +220,11 @@ static void q40_irq_handler(unsigned int irq, struct pt_regs *fp)
218 switch (irq) { 220 switch (irq) {
219 case 4: 221 case 4:
220 case 6: 222 case 6:
221 __m68k_handle_int(Q40_IRQ_SAMPLE, fp); 223 do_IRQ(Q40_IRQ_SAMPLE, fp);
222 return; 224 return;
223 } 225 }
224 if (mir & Q40_IRQ_FRAME_MASK) { 226 if (mir & Q40_IRQ_FRAME_MASK) {
225 __m68k_handle_int(Q40_IRQ_FRAME, fp); 227 do_IRQ(Q40_IRQ_FRAME, fp);
226 master_outb(-1, FRAME_CLEAR_REG); 228 master_outb(-1, FRAME_CLEAR_REG);
227 } 229 }
228 if ((mir & Q40_IRQ_SER_MASK) || (mir & Q40_IRQ_EXT_MASK)) { 230 if ((mir & Q40_IRQ_SER_MASK) || (mir & Q40_IRQ_EXT_MASK)) {
@@ -257,7 +259,7 @@ static void q40_irq_handler(unsigned int irq, struct pt_regs *fp)
257 goto iirq; 259 goto iirq;
258 } 260 }
259 q40_state[irq] |= IRQ_INPROGRESS; 261 q40_state[irq] |= IRQ_INPROGRESS;
260 __m68k_handle_int(irq, fp); 262 do_IRQ(irq, fp);
261 q40_state[irq] &= ~IRQ_INPROGRESS; 263 q40_state[irq] &= ~IRQ_INPROGRESS;
262 264
263 /* naively enable everything, if that fails than */ 265 /* naively enable everything, if that fails than */
@@ -288,25 +290,29 @@ static void q40_irq_handler(unsigned int irq, struct pt_regs *fp)
288 mir = master_inb(IIRQ_REG); 290 mir = master_inb(IIRQ_REG);
289 /* should test whether keyboard irq is really enabled, doing it in defhand */ 291 /* should test whether keyboard irq is really enabled, doing it in defhand */
290 if (mir & Q40_IRQ_KEYB_MASK) 292 if (mir & Q40_IRQ_KEYB_MASK)
291 __m68k_handle_int(Q40_IRQ_KEYBOARD, fp); 293 do_IRQ(Q40_IRQ_KEYBOARD, fp);
292 294
293 return; 295 return;
294} 296}
295 297
296void q40_enable_irq(unsigned int irq) 298void q40_irq_enable(struct irq_data *data)
297{ 299{
300 unsigned int irq = data->irq;
301
298 if (irq >= 5 && irq <= 15) { 302 if (irq >= 5 && irq <= 15) {
299 mext_disabled--; 303 mext_disabled--;
300 if (mext_disabled > 0) 304 if (mext_disabled > 0)
301 printk("q40_enable_irq : nested disable/enable\n"); 305 printk("q40_irq_enable : nested disable/enable\n");
302 if (mext_disabled == 0) 306 if (mext_disabled == 0)
303 master_outb(1, EXT_ENABLE_REG); 307 master_outb(1, EXT_ENABLE_REG);
304 } 308 }
305} 309}
306 310
307 311
308void q40_disable_irq(unsigned int irq) 312void q40_irq_disable(struct irq_data *data)
309{ 313{
314 unsigned int irq = data->irq;
315
310 /* disable ISA iqs : only do something if the driver has been 316 /* disable ISA iqs : only do something if the driver has been
311 * verified to be Q40 "compatible" - right now IDE, NE2K 317 * verified to be Q40 "compatible" - right now IDE, NE2K
312 * Any driver should not attempt to sleep across disable_irq !! 318 * Any driver should not attempt to sleep across disable_irq !!
@@ -319,13 +325,3 @@ void q40_disable_irq(unsigned int irq)
319 printk("disable_irq nesting count %d\n",mext_disabled); 325 printk("disable_irq nesting count %d\n",mext_disabled);
320 } 326 }
321} 327}
322
323unsigned long q40_probe_irq_on(void)
324{
325 printk("irq probing not working - reconfigure the driver to avoid this\n");
326 return -1;
327}
328int q40_probe_irq_off(unsigned long irqs)
329{
330 return -1;
331}
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
index 6464ad3ae3e6..78b60f53e90a 100644
--- a/arch/m68k/sun3/sun3ints.c
+++ b/arch/m68k/sun3/sun3ints.c
@@ -51,25 +51,29 @@ void sun3_disable_irq(unsigned int irq)
51 51
52static irqreturn_t sun3_int7(int irq, void *dev_id) 52static irqreturn_t sun3_int7(int irq, void *dev_id)
53{ 53{
54 *sun3_intreg |= (1 << irq); 54 unsigned int cnt;
55 if (!(kstat_cpu(0).irqs[irq] % 2000)) 55
56 sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 16000) / 2000]); 56 cnt = kstat_irqs_cpu(irq, 0);
57 if (!(cnt % 2000))
58 sun3_leds(led_pattern[cnt % 16000 / 2000]);
57 return IRQ_HANDLED; 59 return IRQ_HANDLED;
58} 60}
59 61
60static irqreturn_t sun3_int5(int irq, void *dev_id) 62static irqreturn_t sun3_int5(int irq, void *dev_id)
61{ 63{
64 unsigned int cnt;
65
62#ifdef CONFIG_SUN3 66#ifdef CONFIG_SUN3
63 intersil_clear(); 67 intersil_clear();
64#endif 68#endif
65 *sun3_intreg |= (1 << irq);
66#ifdef CONFIG_SUN3 69#ifdef CONFIG_SUN3
67 intersil_clear(); 70 intersil_clear();
68#endif 71#endif
69 xtime_update(1); 72 xtime_update(1);
70 update_process_times(user_mode(get_irq_regs())); 73 update_process_times(user_mode(get_irq_regs()));
71 if (!(kstat_cpu(0).irqs[irq] % 20)) 74 cnt = kstat_irqs_cpu(irq, 0);
72 sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]); 75 if (!(cnt % 20))
76 sun3_leds(led_pattern[cnt % 160 / 20]);
73 return IRQ_HANDLED; 77 return IRQ_HANDLED;
74} 78}
75 79
@@ -79,29 +83,33 @@ static irqreturn_t sun3_vec255(int irq, void *dev_id)
79 return IRQ_HANDLED; 83 return IRQ_HANDLED;
80} 84}
81 85
82static void sun3_inthandle(unsigned int irq, struct pt_regs *fp) 86static void sun3_irq_enable(struct irq_data *data)
83{ 87{
84 *sun3_intreg &= ~(1 << irq); 88 sun3_enable_irq(data->irq);
89};
85 90
86 __m68k_handle_int(irq, fp); 91static void sun3_irq_disable(struct irq_data *data)
87} 92{
93 sun3_disable_irq(data->irq);
94};
88 95
89static struct irq_controller sun3_irq_controller = { 96static struct irq_chip sun3_irq_chip = {
90 .name = "sun3", 97 .name = "sun3",
91 .lock = __SPIN_LOCK_UNLOCKED(sun3_irq_controller.lock), 98 .irq_startup = m68k_irq_startup,
92 .startup = m68k_irq_startup, 99 .irq_shutdown = m68k_irq_shutdown,
93 .shutdown = m68k_irq_shutdown, 100 .irq_enable = sun3_irq_enable,
94 .enable = sun3_enable_irq, 101 .irq_disable = sun3_irq_disable,
95 .disable = sun3_disable_irq, 102 .irq_mask = sun3_irq_disable,
103 .irq_unmask = sun3_irq_enable,
96}; 104};
97 105
98void __init sun3_init_IRQ(void) 106void __init sun3_init_IRQ(void)
99{ 107{
100 *sun3_intreg = 1; 108 *sun3_intreg = 1;
101 109
102 m68k_setup_auto_interrupt(sun3_inthandle); 110 m68k_setup_irq_controller(&sun3_irq_chip, handle_level_irq, IRQ_AUTO_1,
103 m68k_setup_irq_controller(&sun3_irq_controller, IRQ_AUTO_1, 7); 111 7);
104 m68k_setup_user_interrupt(VEC_USER, 128, NULL); 112 m68k_setup_user_interrupt(VEC_USER, 128);
105 113
106 if (request_irq(IRQ_AUTO_5, sun3_int5, 0, "int5", NULL)) 114 if (request_irq(IRQ_AUTO_5, sun3_int5, 0, "int5", NULL))
107 pr_err("Couldn't register %s interrupt\n", "int5"); 115 pr_err("Couldn't register %s interrupt\n", "int5");
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 9b4cb00407d7..0be318609fc6 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -286,11 +286,11 @@ CLEAN_FILES += vmlinux.32 vmlinux.64
286archprepare: 286archprepare:
287ifdef CONFIG_MIPS32_N32 287ifdef CONFIG_MIPS32_N32
288 @echo ' Checking missing-syscalls for N32' 288 @echo ' Checking missing-syscalls for N32'
289 $(Q)$(MAKE) $(build)=. missing-syscalls ccflags-y="-mabi=n32" 289 $(Q)$(MAKE) $(build)=. missing-syscalls missing_syscalls_flags="-mabi=n32"
290endif 290endif
291ifdef CONFIG_MIPS32_O32 291ifdef CONFIG_MIPS32_O32
292 @echo ' Checking missing-syscalls for O32' 292 @echo ' Checking missing-syscalls for O32'
293 $(Q)$(MAKE) $(build)=. missing-syscalls ccflags-y="-mabi=32" 293 $(Q)$(MAKE) $(build)=. missing-syscalls missing_syscalls_flags="-mabi=32"
294endif 294endif
295 295
296install: 296install:
diff --git a/arch/powerpc/boot/dts/charon.dts b/arch/powerpc/boot/dts/charon.dts
new file mode 100644
index 000000000000..0e00e508eaa6
--- /dev/null
+++ b/arch/powerpc/boot/dts/charon.dts
@@ -0,0 +1,236 @@
1/*
2 * charon board Device Tree Source
3 *
4 * Copyright (C) 2007 Semihalf
5 * Marian Balakowicz <m8@semihalf.com>
6 *
7 * Copyright (C) 2010 DENX Software Engineering GmbH
8 * Heiko Schocher <hs@denx.de>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16/dts-v1/;
17
18/ {
19 model = "anon,charon";
20 compatible = "anon,charon";
21 #address-cells = <1>;
22 #size-cells = <1>;
23 interrupt-parent = <&mpc5200_pic>;
24
25 cpus {
26 #address-cells = <1>;
27 #size-cells = <0>;
28
29 PowerPC,5200@0 {
30 device_type = "cpu";
31 reg = <0>;
32 d-cache-line-size = <32>;
33 i-cache-line-size = <32>;
34 d-cache-size = <0x4000>; // L1, 16K
35 i-cache-size = <0x4000>; // L1, 16K
36 timebase-frequency = <0>; // from bootloader
37 bus-frequency = <0>; // from bootloader
38 clock-frequency = <0>; // from bootloader
39 };
40 };
41
42 memory {
43 device_type = "memory";
44 reg = <0x00000000 0x08000000>; // 128MB
45 };
46
47 soc5200@f0000000 {
48 #address-cells = <1>;
49 #size-cells = <1>;
50 compatible = "fsl,mpc5200-immr";
51 ranges = <0 0xf0000000 0x0000c000>;
52 reg = <0xf0000000 0x00000100>;
53 bus-frequency = <0>; // from bootloader
54 system-frequency = <0>; // from bootloader
55
56 cdm@200 {
57 compatible = "fsl,mpc5200-cdm";
58 reg = <0x200 0x38>;
59 };
60
61 mpc5200_pic: interrupt-controller@500 {
62 // 5200 interrupts are encoded into two levels;
63 interrupt-controller;
64 #interrupt-cells = <3>;
65 compatible = "fsl,mpc5200-pic";
66 reg = <0x500 0x80>;
67 };
68
69 timer@600 { // General Purpose Timer
70 compatible = "fsl,mpc5200-gpt";
71 reg = <0x600 0x10>;
72 interrupts = <1 9 0>;
73 fsl,has-wdt;
74 };
75
76 can@900 {
77 compatible = "fsl,mpc5200-mscan";
78 interrupts = <2 17 0>;
79 reg = <0x900 0x80>;
80 };
81
82 can@980 {
83 compatible = "fsl,mpc5200-mscan";
84 interrupts = <2 18 0>;
85 reg = <0x980 0x80>;
86 };
87
88 gpio_simple: gpio@b00 {
89 compatible = "fsl,mpc5200-gpio";
90 reg = <0xb00 0x40>;
91 interrupts = <1 7 0>;
92 gpio-controller;
93 #gpio-cells = <2>;
94 };
95
96 usb@1000 {
97 compatible = "fsl,mpc5200-ohci","ohci-be";
98 reg = <0x1000 0xff>;
99 interrupts = <2 6 0>;
100 };
101
102 dma-controller@1200 {
103 device_type = "dma-controller";
104 compatible = "fsl,mpc5200-bestcomm";
105 reg = <0x1200 0x80>;
106 interrupts = <3 0 0 3 1 0 3 2 0 3 3 0
107 3 4 0 3 5 0 3 6 0 3 7 0
108 3 8 0 3 9 0 3 10 0 3 11 0
109 3 12 0 3 13 0 3 14 0 3 15 0>;
110 };
111
112 xlb@1f00 {
113 compatible = "fsl,mpc5200-xlb";
114 reg = <0x1f00 0x100>;
115 };
116
117 serial@2000 { // PSC1
118 compatible = "fsl,mpc5200-psc-uart";
119 reg = <0x2000 0x100>;
120 interrupts = <2 1 0>;
121 };
122
123 serial@2400 { // PSC3
124 compatible = "fsl,mpc5200-psc-uart";
125 reg = <0x2400 0x100>;
126 interrupts = <2 3 0>;
127 };
128
129 ethernet@3000 {
130 compatible = "fsl,mpc5200-fec";
131 reg = <0x3000 0x400>;
132 local-mac-address = [ 00 00 00 00 00 00 ];
133 interrupts = <2 5 0>;
134 fixed-link = <1 1 100 0 0>;
135 };
136
137 mdio@3000 {
138 #address-cells = <1>;
139 #size-cells = <0>;
140 compatible = "fsl,mpc5200-mdio";
141 reg = <0x3000 0x400>; // fec range, since we need to setup fec interrupts
142 interrupts = <2 5 0>; // these are for "mii command finished", not link changes & co.
143 };
144
145 ata@3a00 {
146 compatible = "fsl,mpc5200-ata";
147 reg = <0x3a00 0x100>;
148 interrupts = <2 7 0>;
149 };
150
151 i2c@3d00 {
152 #address-cells = <1>;
153 #size-cells = <0>;
154 compatible = "fsl,mpc5200-i2c","fsl-i2c";
155 reg = <0x3d00 0x40>;
156 interrupts = <2 15 0>;
157 };
158
159
160 i2c@3d40 {
161 #address-cells = <1>;
162 #size-cells = <0>;
163 compatible = "fsl,mpc5200-i2c","fsl-i2c";
164 reg = <0x3d40 0x40>;
165 interrupts = <2 16 0>;
166
167 dtt@28 {
168 compatible = "national,lm80";
169 reg = <0x28>;
170 };
171
172 rtc@68 {
173 compatible = "dallas,ds1374";
174 reg = <0x68>;
175 };
176 };
177
178 sram@8000 {
179 compatible = "fsl,mpc5200-sram";
180 reg = <0x8000 0x4000>;
181 };
182 };
183
184 localbus {
185 compatible = "fsl,mpc5200-lpb","simple-bus";
186 #address-cells = <2>;
187 #size-cells = <1>;
188 ranges = < 0 0 0xfc000000 0x02000000
189 1 0 0xe0000000 0x04000000 // CS1 range, SM501
190 3 0 0xe8000000 0x00080000>;
191
192 flash@0,0 {
193 compatible = "cfi-flash";
194 reg = <0 0 0x02000000>;
195 bank-width = <4>;
196 device-width = <2>;
197 #size-cells = <1>;
198 #address-cells = <1>;
199 };
200
201 display@1,0 {
202 compatible = "smi,sm501";
203 reg = <1 0x00000000 0x00800000
204 1 0x03e00000 0x00200000>;
205 mode = "640x480-32@60";
206 interrupts = <1 1 3>;
207 little-endian;
208 };
209
210 mram0@3,0 {
211 compatible = "mtd-ram";
212 reg = <3 0x00000 0x80000>;
213 bank-width = <1>;
214 };
215 };
216
217 pci@f0000d00 {
218 #interrupt-cells = <1>;
219 #size-cells = <2>;
220 #address-cells = <3>;
221 device_type = "pci";
222 compatible = "fsl,mpc5200-pci";
223 reg = <0xf0000d00 0x100>;
224 interrupt-map-mask = <0xf800 0 0 7>;
225 interrupt-map = <0xc000 0 0 1 &mpc5200_pic 0 0 3
226 0xc000 0 0 2 &mpc5200_pic 0 0 3
227 0xc000 0 0 3 &mpc5200_pic 0 0 3
228 0xc000 0 0 4 &mpc5200_pic 0 0 3>;
229 clock-frequency = <0>; // From boot loader
230 interrupts = <2 8 0 2 9 0 2 10 0>;
231 bus-range = <0 0>;
232 ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000
233 0x02000000 0 0x90000000 0x90000000 0 0x10000000
234 0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
235 };
236};
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index 959cd2cfc275..716a37be16e3 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -1,9 +1,10 @@
1CONFIG_EXPERIMENTAL=y 1CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_SPARSE_IRQ=y
3CONFIG_LOG_BUF_SHIFT=14 4CONFIG_LOG_BUF_SHIFT=14
4CONFIG_BLK_DEV_INITRD=y 5CONFIG_BLK_DEV_INITRD=y
5# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 6# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
6CONFIG_EXPERT=y 7CONFIG_EMBEDDED=y
7# CONFIG_SYSCTL_SYSCALL is not set 8# CONFIG_SYSCTL_SYSCALL is not set
8# CONFIG_KALLSYMS is not set 9# CONFIG_KALLSYMS is not set
9# CONFIG_EPOLL is not set 10# CONFIG_EPOLL is not set
@@ -17,7 +18,6 @@ CONFIG_PPC_MPC5200_SIMPLE=y
17CONFIG_PPC_MPC5200_BUGFIX=y 18CONFIG_PPC_MPC5200_BUGFIX=y
18# CONFIG_PPC_PMAC is not set 19# CONFIG_PPC_PMAC is not set
19CONFIG_PPC_BESTCOMM=y 20CONFIG_PPC_BESTCOMM=y
20CONFIG_SPARSE_IRQ=y
21CONFIG_PM=y 21CONFIG_PM=y
22# CONFIG_PCI is not set 22# CONFIG_PCI is not set
23CONFIG_NET=y 23CONFIG_NET=y
@@ -38,17 +38,18 @@ CONFIG_MTD=y
38CONFIG_MTD_CONCAT=y 38CONFIG_MTD_CONCAT=y
39CONFIG_MTD_PARTITIONS=y 39CONFIG_MTD_PARTITIONS=y
40CONFIG_MTD_CMDLINE_PARTS=y 40CONFIG_MTD_CMDLINE_PARTS=y
41CONFIG_MTD_OF_PARTS=y
41CONFIG_MTD_CHAR=y 42CONFIG_MTD_CHAR=y
42CONFIG_MTD_BLOCK=y 43CONFIG_MTD_BLOCK=y
43CONFIG_MTD_CFI=y 44CONFIG_MTD_CFI=y
44CONFIG_MTD_CFI_AMDSTD=y 45CONFIG_MTD_CFI_AMDSTD=y
45CONFIG_MTD_ROM=y 46CONFIG_MTD_ROM=y
46CONFIG_MTD_PHYSMAP_OF=y 47CONFIG_MTD_PHYSMAP_OF=y
48CONFIG_MTD_PLATRAM=y
47CONFIG_PROC_DEVICETREE=y 49CONFIG_PROC_DEVICETREE=y
48CONFIG_BLK_DEV_LOOP=y 50CONFIG_BLK_DEV_LOOP=y
49CONFIG_BLK_DEV_RAM=y 51CONFIG_BLK_DEV_RAM=y
50CONFIG_BLK_DEV_RAM_SIZE=32768 52CONFIG_BLK_DEV_RAM_SIZE=32768
51# CONFIG_MISC_DEVICES is not set
52CONFIG_BLK_DEV_SD=y 53CONFIG_BLK_DEV_SD=y
53CONFIG_CHR_DEV_SG=y 54CONFIG_CHR_DEV_SG=y
54CONFIG_ATA=y 55CONFIG_ATA=y
@@ -56,13 +57,11 @@ CONFIG_PATA_MPC52xx=y
56CONFIG_PATA_PLATFORM=y 57CONFIG_PATA_PLATFORM=y
57CONFIG_NETDEVICES=y 58CONFIG_NETDEVICES=y
58CONFIG_LXT_PHY=y 59CONFIG_LXT_PHY=y
60CONFIG_FIXED_PHY=y
59CONFIG_NET_ETHERNET=y 61CONFIG_NET_ETHERNET=y
60CONFIG_FEC_MPC52xx=y 62CONFIG_FEC_MPC52xx=y
61# CONFIG_NETDEV_1000 is not set 63# CONFIG_NETDEV_1000 is not set
62# CONFIG_NETDEV_10000 is not set 64# CONFIG_NETDEV_10000 is not set
63# CONFIG_INPUT is not set
64# CONFIG_SERIO is not set
65# CONFIG_VT is not set
66CONFIG_SERIAL_MPC52xx=y 65CONFIG_SERIAL_MPC52xx=y
67CONFIG_SERIAL_MPC52xx_CONSOLE=y 66CONFIG_SERIAL_MPC52xx_CONSOLE=y
68CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200 67CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
@@ -70,7 +69,13 @@ CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
70CONFIG_I2C=y 69CONFIG_I2C=y
71CONFIG_I2C_CHARDEV=y 70CONFIG_I2C_CHARDEV=y
72CONFIG_I2C_MPC=y 71CONFIG_I2C_MPC=y
72CONFIG_SENSORS_LM80=y
73CONFIG_WATCHDOG=y 73CONFIG_WATCHDOG=y
74CONFIG_MFD_SM501=y
75CONFIG_FB=y
76CONFIG_FB_FOREIGN_ENDIAN=y
77CONFIG_FB_SM501=y
78CONFIG_FRAMEBUFFER_CONSOLE=y
74CONFIG_USB=y 79CONFIG_USB=y
75CONFIG_USB_DEVICEFS=y 80CONFIG_USB_DEVICEFS=y
76# CONFIG_USB_DEVICE_CLASS is not set 81# CONFIG_USB_DEVICE_CLASS is not set
@@ -80,10 +85,10 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
80CONFIG_USB_STORAGE=y 85CONFIG_USB_STORAGE=y
81CONFIG_RTC_CLASS=y 86CONFIG_RTC_CLASS=y
82CONFIG_RTC_DRV_DS1307=y 87CONFIG_RTC_DRV_DS1307=y
88CONFIG_RTC_DRV_DS1374=y
83CONFIG_EXT2_FS=y 89CONFIG_EXT2_FS=y
84CONFIG_EXT3_FS=y 90CONFIG_EXT3_FS=y
85# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 91# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
86CONFIG_INOTIFY=y
87CONFIG_MSDOS_FS=y 92CONFIG_MSDOS_FS=y
88CONFIG_VFAT_FS=y 93CONFIG_VFAT_FS=y
89CONFIG_PROC_KCORE=y 94CONFIG_PROC_KCORE=y
@@ -102,7 +107,6 @@ CONFIG_DEBUG_KERNEL=y
102CONFIG_DETECT_HUNG_TASK=y 107CONFIG_DETECT_HUNG_TASK=y
103# CONFIG_DEBUG_BUGVERBOSE is not set 108# CONFIG_DEBUG_BUGVERBOSE is not set
104CONFIG_DEBUG_INFO=y 109CONFIG_DEBUG_INFO=y
105# CONFIG_RCU_CPU_STALL_DETECTOR is not set
106CONFIG_CRYPTO_ECB=y 110CONFIG_CRYPTO_ECB=y
107CONFIG_CRYPTO_PCBC=y 111CONFIG_CRYPTO_PCBC=y
108# CONFIG_CRYPTO_ANSI_CPRNG is not set 112# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 84a685a505fe..535711fcb13c 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -485,3 +485,7 @@ CONFIG_CRYPTO_TWOFISH=m
485CONFIG_CRYPTO_LZO=m 485CONFIG_CRYPTO_LZO=m
486# CONFIG_CRYPTO_ANSI_CPRNG is not set 486# CONFIG_CRYPTO_ANSI_CPRNG is not set
487# CONFIG_CRYPTO_HW is not set 487# CONFIG_CRYPTO_HW is not set
488CONFIG_VIRTUALIZATION=y
489CONFIG_KVM_BOOK3S_64=m
490CONFIG_KVM_BOOK3S_64_HV=y
491CONFIG_VHOST_NET=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 96a58b709705..a72f2415a647 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -362,3 +362,7 @@ CONFIG_CRYPTO_TWOFISH=m
362CONFIG_CRYPTO_LZO=m 362CONFIG_CRYPTO_LZO=m
363# CONFIG_CRYPTO_ANSI_CPRNG is not set 363# CONFIG_CRYPTO_ANSI_CPRNG is not set
364# CONFIG_CRYPTO_HW is not set 364# CONFIG_CRYPTO_HW is not set
365CONFIG_VIRTUALIZATION=y
366CONFIG_KVM_BOOK3S_64=m
367CONFIG_KVM_BOOK3S_64_HV=y
368CONFIG_VHOST_NET=m
diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h
index 24bd34c57e9d..936a904ae78c 100644
--- a/arch/powerpc/include/asm/floppy.h
+++ b/arch/powerpc/include/asm/floppy.h
@@ -108,10 +108,10 @@ static int fd_request_irq(void)
108{ 108{
109 if (can_use_virtual_dma) 109 if (can_use_virtual_dma)
110 return request_irq(FLOPPY_IRQ, floppy_hardint, 110 return request_irq(FLOPPY_IRQ, floppy_hardint,
111 IRQF_DISABLED, "floppy", NULL); 111 0, "floppy", NULL);
112 else 112 else
113 return request_irq(FLOPPY_IRQ, floppy_interrupt, 113 return request_irq(FLOPPY_IRQ, floppy_interrupt,
114 IRQF_DISABLED, "floppy", NULL); 114 0, "floppy", NULL);
115} 115}
116 116
117static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) 117static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
diff --git a/arch/powerpc/include/asm/lv1call.h b/arch/powerpc/include/asm/lv1call.h
index 9cd5fc828a37..f77c708c67a0 100644
--- a/arch/powerpc/include/asm/lv1call.h
+++ b/arch/powerpc/include/asm/lv1call.h
@@ -316,7 +316,7 @@ LV1_CALL(gpu_context_free, 1, 0, 218 )
316LV1_CALL(gpu_context_iomap, 5, 0, 221 ) 316LV1_CALL(gpu_context_iomap, 5, 0, 221 )
317LV1_CALL(gpu_context_attribute, 6, 0, 225 ) 317LV1_CALL(gpu_context_attribute, 6, 0, 225 )
318LV1_CALL(gpu_context_intr, 1, 1, 227 ) 318LV1_CALL(gpu_context_intr, 1, 1, 227 )
319LV1_CALL(gpu_attribute, 5, 0, 228 ) 319LV1_CALL(gpu_attribute, 3, 0, 228 )
320LV1_CALL(get_rtc, 0, 2, 232 ) 320LV1_CALL(get_rtc, 0, 2, 232 )
321LV1_CALL(set_ppe_periodic_tracer_frequency, 1, 0, 240 ) 321LV1_CALL(set_ppe_periodic_tracer_frequency, 1, 0, 240 )
322LV1_CALL(start_ppe_periodic_tracer, 5, 0, 241 ) 322LV1_CALL(start_ppe_periodic_tracer, 5, 0, 241 )
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index bd6c401c0ee5..c48de98ba94e 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -15,8 +15,8 @@
15#define DEFAULT_PRIORITY 5 15#define DEFAULT_PRIORITY 5
16 16
17/* 17/*
18 * Mark IPIs as higher priority so we can take them inside interrupts that 18 * Mark IPIs as higher priority so we can take them inside interrupts
19 * arent marked IRQF_DISABLED 19 * FIXME: still true now?
20 */ 20 */
21#define IPI_PRIORITY 4 21#define IPI_PRIORITY 4
22 22
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a54d92fec612..cf9c69b9189c 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -267,7 +267,7 @@ vsx_unavailable_pSeries_1:
267 267
268#ifdef CONFIG_CBE_RAS 268#ifdef CONFIG_CBE_RAS
269 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) 269 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
270 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202) 270 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
271#endif /* CONFIG_CBE_RAS */ 271#endif /* CONFIG_CBE_RAS */
272 272
273 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) 273 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
@@ -275,7 +275,7 @@ vsx_unavailable_pSeries_1:
275 275
276#ifdef CONFIG_CBE_RAS 276#ifdef CONFIG_CBE_RAS
277 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) 277 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
278 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602) 278 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
279#endif /* CONFIG_CBE_RAS */ 279#endif /* CONFIG_CBE_RAS */
280 280
281 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) 281 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
@@ -283,7 +283,7 @@ vsx_unavailable_pSeries_1:
283 283
284#ifdef CONFIG_CBE_RAS 284#ifdef CONFIG_CBE_RAS
285 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) 285 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
286 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802) 286 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
287#endif /* CONFIG_CBE_RAS */ 287#endif /* CONFIG_CBE_RAS */
288 288
289 . = 0x3000 289 . = 0x3000
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 25ddbfc7dd36..6df70907d60a 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -187,7 +187,7 @@ int smp_request_message_ipi(int virq, int msg)
187 return 1; 187 return 1;
188 } 188 }
189#endif 189#endif
190 err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU, 190 err = request_irq(virq, smp_ipi_action[msg], IRQF_PERCPU,
191 smp_ipi_name[msg], 0); 191 smp_ipi_name[msg], 0);
192 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", 192 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
193 virq, smp_ipi_name[msg], err); 193 virq, smp_ipi_name[msg], err);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index f422231d9235..44d8829334ab 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1263,7 +1263,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1263 addi r6,r5,VCORE_NAPPING_THREADS 1263 addi r6,r5,VCORE_NAPPING_THREADS
126431: lwarx r4,0,r6 126431: lwarx r4,0,r6
1265 or r4,r4,r0 1265 or r4,r4,r0
1266 popcntw r7,r4 1266 PPC_POPCNTW(r7,r4)
1267 cmpw r7,r8 1267 cmpw r7,r8
1268 bge 2f 1268 bge 2f
1269 stwcx. r4,0,r6 1269 stwcx. r4,0,r6
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 16da595ff402..2dd6bdd31fe1 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -34,6 +34,7 @@
34#include <linux/suspend.h> 34#include <linux/suspend.h>
35#include <linux/memblock.h> 35#include <linux/memblock.h>
36#include <linux/hugetlb.h> 36#include <linux/hugetlb.h>
37#include <linux/slab.h>
37 38
38#include <asm/pgalloc.h> 39#include <asm/pgalloc.h>
39#include <asm/prom.h> 40#include <asm/prom.h>
@@ -555,3 +556,32 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
555 book3e_hugetlb_preload(vma->vm_mm, address, *ptep); 556 book3e_hugetlb_preload(vma->vm_mm, address, *ptep);
556#endif 557#endif
557} 558}
559
560/*
561 * System memory should not be in /proc/iomem but various tools expect it
562 * (eg kdump).
563 */
564static int add_system_ram_resources(void)
565{
566 struct memblock_region *reg;
567
568 for_each_memblock(memory, reg) {
569 struct resource *res;
570 unsigned long base = reg->base;
571 unsigned long size = reg->size;
572
573 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
574 WARN_ON(!res);
575
576 if (res) {
577 res->name = "System RAM";
578 res->start = base;
579 res->end = base + size - 1;
580 res->flags = IORESOURCE_MEM;
581 WARN_ON(request_resource(&iomem_resource, res) < 0);
582 }
583 }
584
585 return 0;
586}
587subsys_initcall(add_system_ram_resources);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index c7dd4dec4df8..b22a83a91cb8 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -315,7 +315,10 @@ static int __init find_min_common_depth(void)
315 struct device_node *root; 315 struct device_node *root;
316 const char *vec5; 316 const char *vec5;
317 317
318 root = of_find_node_by_path("/rtas"); 318 if (firmware_has_feature(FW_FEATURE_OPAL))
319 root = of_find_node_by_path("/ibm,opal");
320 else
321 root = of_find_node_by_path("/rtas");
319 if (!root) 322 if (!root)
320 root = of_find_node_by_path("/"); 323 root = of_find_node_by_path("/");
321 324
@@ -344,12 +347,19 @@ static int __init find_min_common_depth(void)
344 347
345#define VEC5_AFFINITY_BYTE 5 348#define VEC5_AFFINITY_BYTE 5
346#define VEC5_AFFINITY 0x80 349#define VEC5_AFFINITY 0x80
347 chosen = of_find_node_by_path("/chosen"); 350
348 if (chosen) { 351 if (firmware_has_feature(FW_FEATURE_OPAL))
349 vec5 = of_get_property(chosen, "ibm,architecture-vec-5", NULL); 352 form1_affinity = 1;
350 if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & VEC5_AFFINITY)) { 353 else {
351 dbg("Using form 1 affinity\n"); 354 chosen = of_find_node_by_path("/chosen");
352 form1_affinity = 1; 355 if (chosen) {
356 vec5 = of_get_property(chosen,
357 "ibm,architecture-vec-5", NULL);
358 if (vec5 && (vec5[VEC5_AFFINITY_BYTE] &
359 VEC5_AFFINITY)) {
360 dbg("Using form 1 affinity\n");
361 form1_affinity = 1;
362 }
353 } 363 }
354 } 364 }
355 365
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
index e36d6e232ae6..846b789fb195 100644
--- a/arch/powerpc/platforms/52xx/mpc5200_simple.c
+++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
@@ -50,6 +50,7 @@ static void __init mpc5200_simple_setup_arch(void)
50 50
51/* list of the supported boards */ 51/* list of the supported boards */
52static const char *board[] __initdata = { 52static const char *board[] __initdata = {
53 "anon,charon",
53 "intercontrol,digsy-mtc", 54 "intercontrol,digsy-mtc",
54 "manroland,mucmc52", 55 "manroland,mucmc52",
55 "manroland,uc101", 56 "manroland,uc101",
diff --git a/arch/powerpc/platforms/cell/beat.c b/arch/powerpc/platforms/cell/beat.c
index 232fc384e855..852592b2b712 100644
--- a/arch/powerpc/platforms/cell/beat.c
+++ b/arch/powerpc/platforms/cell/beat.c
@@ -230,7 +230,7 @@ static int __init beat_register_event(void)
230 } 230 }
231 ev->virq = virq; 231 ev->virq = virq;
232 232
233 rc = request_irq(virq, ev->handler, IRQF_DISABLED, 233 rc = request_irq(virq, ev->handler, 0,
234 ev->typecode, NULL); 234 ev->typecode, NULL);
235 if (rc != 0) { 235 if (rc != 0) {
236 printk(KERN_ERR "Beat: failed to request virtual IRQ" 236 printk(KERN_ERR "Beat: failed to request virtual IRQ"
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
index ae790ac4a589..14be2bd358b8 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
@@ -514,7 +514,7 @@ static __init int celleb_setup_pciex(struct device_node *node,
514 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 514 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
515 oirq.size); 515 oirq.size);
516 if (request_irq(virq, pciex_handle_internal_irq, 516 if (request_irq(virq, pciex_handle_internal_irq,
517 IRQF_DISABLED, "pciex", (void *)phb)) { 517 0, "pciex", (void *)phb)) {
518 pr_err("PCIEXC:Failed to request irq\n"); 518 pr_err("PCIEXC:Failed to request irq\n");
519 goto error; 519 goto error;
520 } 520 }
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index fc46fcac3921..592c3d51b817 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -412,8 +412,7 @@ static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
412 IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT)); 412 IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
413 BUG_ON(virq == NO_IRQ); 413 BUG_ON(virq == NO_IRQ);
414 414
415 ret = request_irq(virq, ioc_interrupt, IRQF_DISABLED, 415 ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu);
416 iommu->name, iommu);
417 BUG_ON(ret); 416 BUG_ON(ret);
418 417
419 /* set the IOC segment table origin register (and turn on the iommu) */ 418 /* set the IOC segment table origin register (and turn on the iommu) */
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
index 1acf36010423..59c1a1694104 100644
--- a/arch/powerpc/platforms/cell/pmu.c
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -392,7 +392,7 @@ static int __init cbe_init_pm_irq(void)
392 } 392 }
393 393
394 rc = request_irq(irq, cbe_pm_irq, 394 rc = request_irq(irq, cbe_pm_irq,
395 IRQF_DISABLED, "cbe-pmu-0", NULL); 395 0, "cbe-pmu-0", NULL);
396 if (rc) { 396 if (rc) {
397 printk("ERROR: Request for irq on node %d failed\n", 397 printk("ERROR: Request for irq on node %d failed\n",
398 node); 398 node);
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 3675da73623f..e94d3ecdd8bb 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -442,8 +442,7 @@ static int spu_request_irqs(struct spu *spu)
442 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", 442 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
443 spu->number); 443 spu->number);
444 ret = request_irq(spu->irqs[0], spu_irq_class_0, 444 ret = request_irq(spu->irqs[0], spu_irq_class_0,
445 IRQF_DISABLED, 445 0, spu->irq_c0, spu);
446 spu->irq_c0, spu);
447 if (ret) 446 if (ret)
448 goto bail0; 447 goto bail0;
449 } 448 }
@@ -451,8 +450,7 @@ static int spu_request_irqs(struct spu *spu)
451 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", 450 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
452 spu->number); 451 spu->number);
453 ret = request_irq(spu->irqs[1], spu_irq_class_1, 452 ret = request_irq(spu->irqs[1], spu_irq_class_1,
454 IRQF_DISABLED, 453 0, spu->irq_c1, spu);
455 spu->irq_c1, spu);
456 if (ret) 454 if (ret)
457 goto bail1; 455 goto bail1;
458 } 456 }
@@ -460,8 +458,7 @@ static int spu_request_irqs(struct spu *spu)
460 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", 458 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
461 spu->number); 459 spu->number);
462 ret = request_irq(spu->irqs[2], spu_irq_class_2, 460 ret = request_irq(spu->irqs[2], spu_irq_class_2,
463 IRQF_DISABLED, 461 0, spu->irq_c2, spu);
464 spu->irq_c2, spu);
465 if (ret) 462 if (ret)
466 goto bail2; 463 goto bail2;
467 } 464 }
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index cb40e921a565..901bfbddc3dd 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -272,7 +272,6 @@ static struct irqaction xmon_action = {
272 272
273static struct irqaction gatwick_cascade_action = { 273static struct irqaction gatwick_cascade_action = {
274 .handler = gatwick_action, 274 .handler = gatwick_action,
275 .flags = IRQF_DISABLED,
276 .name = "cascade", 275 .name = "cascade",
277}; 276};
278 277
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 9a521dc8e485..9b6a820bdd7d 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -200,7 +200,7 @@ static int psurge_secondary_ipi_init(void)
200 200
201 if (psurge_secondary_virq) 201 if (psurge_secondary_virq)
202 rc = request_irq(psurge_secondary_virq, psurge_ipi_intr, 202 rc = request_irq(psurge_secondary_virq, psurge_ipi_intr,
203 IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); 203 IRQF_PERCPU, "IPI", NULL);
204 204
205 if (rc) 205 if (rc)
206 pr_err("Failed to setup secondary cpu IPI\n"); 206 pr_err("Failed to setup secondary cpu IPI\n");
@@ -408,7 +408,7 @@ static int __init smp_psurge_kick_cpu(int nr)
408 408
409static struct irqaction psurge_irqaction = { 409static struct irqaction psurge_irqaction = {
410 .handler = psurge_ipi_intr, 410 .handler = psurge_ipi_intr,
411 .flags = IRQF_DISABLED|IRQF_PERCPU, 411 .flags = IRQF_PERCPU,
412 .name = "primary IPI", 412 .name = "primary IPI",
413}; 413};
414 414
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index 6c4b5837fc8a..3f175e8aedb4 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -825,7 +825,7 @@ static int ps3_probe_thread(void *data)
825 825
826 spin_lock_init(&dev.lock); 826 spin_lock_init(&dev.lock);
827 827
828 res = request_irq(irq, ps3_notification_interrupt, IRQF_DISABLED, 828 res = request_irq(irq, ps3_notification_interrupt, 0,
829 "ps3_notification", &dev); 829 "ps3_notification", &dev);
830 if (res) { 830 if (res) {
831 pr_err("%s:%u: request_irq failed %d\n", __func__, __LINE__, 831 pr_err("%s:%u: request_irq failed %d\n", __func__, __LINE__,
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
index 5e304c292f68..ca40f6afd35d 100644
--- a/arch/powerpc/platforms/ps3/repository.c
+++ b/arch/powerpc/platforms/ps3/repository.c
@@ -184,7 +184,7 @@ int ps3_repository_read_bus_type(unsigned int bus_index,
184 enum ps3_bus_type *bus_type) 184 enum ps3_bus_type *bus_type)
185{ 185{
186 int result; 186 int result;
187 u64 v1; 187 u64 v1 = 0;
188 188
189 result = read_node(PS3_LPAR_ID_PME, 189 result = read_node(PS3_LPAR_ID_PME,
190 make_first_field("bus", bus_index), 190 make_first_field("bus", bus_index),
@@ -199,7 +199,7 @@ int ps3_repository_read_bus_num_dev(unsigned int bus_index,
199 unsigned int *num_dev) 199 unsigned int *num_dev)
200{ 200{
201 int result; 201 int result;
202 u64 v1; 202 u64 v1 = 0;
203 203
204 result = read_node(PS3_LPAR_ID_PME, 204 result = read_node(PS3_LPAR_ID_PME,
205 make_first_field("bus", bus_index), 205 make_first_field("bus", bus_index),
@@ -239,7 +239,7 @@ int ps3_repository_read_dev_type(unsigned int bus_index,
239 unsigned int dev_index, enum ps3_dev_type *dev_type) 239 unsigned int dev_index, enum ps3_dev_type *dev_type)
240{ 240{
241 int result; 241 int result;
242 u64 v1; 242 u64 v1 = 0;
243 243
244 result = read_node(PS3_LPAR_ID_PME, 244 result = read_node(PS3_LPAR_ID_PME,
245 make_first_field("bus", bus_index), 245 make_first_field("bus", bus_index),
@@ -256,8 +256,8 @@ int ps3_repository_read_dev_intr(unsigned int bus_index,
256 enum ps3_interrupt_type *intr_type, unsigned int *interrupt_id) 256 enum ps3_interrupt_type *intr_type, unsigned int *interrupt_id)
257{ 257{
258 int result; 258 int result;
259 u64 v1; 259 u64 v1 = 0;
260 u64 v2; 260 u64 v2 = 0;
261 261
262 result = read_node(PS3_LPAR_ID_PME, 262 result = read_node(PS3_LPAR_ID_PME,
263 make_first_field("bus", bus_index), 263 make_first_field("bus", bus_index),
@@ -275,7 +275,7 @@ int ps3_repository_read_dev_reg_type(unsigned int bus_index,
275 enum ps3_reg_type *reg_type) 275 enum ps3_reg_type *reg_type)
276{ 276{
277 int result; 277 int result;
278 u64 v1; 278 u64 v1 = 0;
279 279
280 result = read_node(PS3_LPAR_ID_PME, 280 result = read_node(PS3_LPAR_ID_PME,
281 make_first_field("bus", bus_index), 281 make_first_field("bus", bus_index),
@@ -615,7 +615,7 @@ int ps3_repository_read_stor_dev_num_regions(unsigned int bus_index,
615 unsigned int dev_index, unsigned int *num_regions) 615 unsigned int dev_index, unsigned int *num_regions)
616{ 616{
617 int result; 617 int result;
618 u64 v1; 618 u64 v1 = 0;
619 619
620 result = read_node(PS3_LPAR_ID_PME, 620 result = read_node(PS3_LPAR_ID_PME,
621 make_first_field("bus", bus_index), 621 make_first_field("bus", bus_index),
@@ -631,7 +631,7 @@ int ps3_repository_read_stor_dev_region_id(unsigned int bus_index,
631 unsigned int *region_id) 631 unsigned int *region_id)
632{ 632{
633 int result; 633 int result;
634 u64 v1; 634 u64 v1 = 0;
635 635
636 result = read_node(PS3_LPAR_ID_PME, 636 result = read_node(PS3_LPAR_ID_PME,
637 make_first_field("bus", bus_index), 637 make_first_field("bus", bus_index),
@@ -786,7 +786,7 @@ int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size, u64 *region_total)
786int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved) 786int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved)
787{ 787{
788 int result; 788 int result;
789 u64 v1; 789 u64 v1 = 0;
790 790
791 result = read_node(PS3_LPAR_ID_CURRENT, 791 result = read_node(PS3_LPAR_ID_CURRENT,
792 make_first_field("bi", 0), 792 make_first_field("bi", 0),
@@ -805,7 +805,7 @@ int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved)
805int ps3_repository_read_num_spu_resource_id(unsigned int *num_resource_id) 805int ps3_repository_read_num_spu_resource_id(unsigned int *num_resource_id)
806{ 806{
807 int result; 807 int result;
808 u64 v1; 808 u64 v1 = 0;
809 809
810 result = read_node(PS3_LPAR_ID_CURRENT, 810 result = read_node(PS3_LPAR_ID_CURRENT,
811 make_first_field("bi", 0), 811 make_first_field("bi", 0),
@@ -827,8 +827,8 @@ int ps3_repository_read_spu_resource_id(unsigned int res_index,
827 enum ps3_spu_resource_type *resource_type, unsigned int *resource_id) 827 enum ps3_spu_resource_type *resource_type, unsigned int *resource_id)
828{ 828{
829 int result; 829 int result;
830 u64 v1; 830 u64 v1 = 0;
831 u64 v2; 831 u64 v2 = 0;
832 832
833 result = read_node(PS3_LPAR_ID_CURRENT, 833 result = read_node(PS3_LPAR_ID_CURRENT,
834 make_first_field("bi", 0), 834 make_first_field("bi", 0),
@@ -854,7 +854,7 @@ static int ps3_repository_read_boot_dat_address(u64 *address)
854int ps3_repository_read_boot_dat_size(unsigned int *size) 854int ps3_repository_read_boot_dat_size(unsigned int *size)
855{ 855{
856 int result; 856 int result;
857 u64 v1; 857 u64 v1 = 0;
858 858
859 result = read_node(PS3_LPAR_ID_CURRENT, 859 result = read_node(PS3_LPAR_ID_CURRENT,
860 make_first_field("bi", 0), 860 make_first_field("bi", 0),
@@ -869,7 +869,7 @@ int ps3_repository_read_boot_dat_size(unsigned int *size)
869int ps3_repository_read_vuart_av_port(unsigned int *port) 869int ps3_repository_read_vuart_av_port(unsigned int *port)
870{ 870{
871 int result; 871 int result;
872 u64 v1; 872 u64 v1 = 0;
873 873
874 result = read_node(PS3_LPAR_ID_CURRENT, 874 result = read_node(PS3_LPAR_ID_CURRENT,
875 make_first_field("bi", 0), 875 make_first_field("bi", 0),
@@ -884,7 +884,7 @@ int ps3_repository_read_vuart_av_port(unsigned int *port)
884int ps3_repository_read_vuart_sysmgr_port(unsigned int *port) 884int ps3_repository_read_vuart_sysmgr_port(unsigned int *port)
885{ 885{
886 int result; 886 int result;
887 u64 v1; 887 u64 v1 = 0;
888 888
889 result = read_node(PS3_LPAR_ID_CURRENT, 889 result = read_node(PS3_LPAR_ID_CURRENT,
890 make_first_field("bi", 0), 890 make_first_field("bi", 0),
@@ -919,7 +919,7 @@ int ps3_repository_read_boot_dat_info(u64 *lpar_addr, unsigned int *size)
919int ps3_repository_read_num_be(unsigned int *num_be) 919int ps3_repository_read_num_be(unsigned int *num_be)
920{ 920{
921 int result; 921 int result;
922 u64 v1; 922 u64 v1 = 0;
923 923
924 result = read_node(PS3_LPAR_ID_PME, 924 result = read_node(PS3_LPAR_ID_PME,
925 make_first_field("ben", 0), 925 make_first_field("ben", 0),
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index de170fd5ba4e..22ffccd8bef5 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -23,7 +23,7 @@
23 */ 23 */
24 24
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/export.h> 26#include <linux/module.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 0842c6f8a3e6..8c7e8528e7c4 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -800,8 +800,6 @@ static void mpic_end_ipi(struct irq_data *d)
800 * IPIs are marked IRQ_PER_CPU. This has the side effect of 800 * IPIs are marked IRQ_PER_CPU. This has the side effect of
801 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from 801 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
802 * applying to them. We EOI them late to avoid re-entering. 802 * applying to them. We EOI them late to avoid re-entering.
803 * We mark IPI's with IRQF_DISABLED as they must run with
804 * irqs disabled.
805 */ 803 */
806 mpic_eoi(mpic); 804 mpic_eoi(mpic);
807} 805}
diff --git a/arch/powerpc/sysdev/ppc4xx_soc.c b/arch/powerpc/sysdev/ppc4xx_soc.c
index d3d6ce3c33b4..0debcc31ad70 100644
--- a/arch/powerpc/sysdev/ppc4xx_soc.c
+++ b/arch/powerpc/sysdev/ppc4xx_soc.c
@@ -115,7 +115,7 @@ static int __init ppc4xx_l2c_probe(void)
115 } 115 }
116 116
117 /* Install error handler */ 117 /* Install error handler */
118 if (request_irq(irq, l2c_error_handler, IRQF_DISABLED, "L2C", 0) < 0) { 118 if (request_irq(irq, l2c_error_handler, 0, "L2C", 0) < 0) {
119 printk(KERN_ERR "Cannot install L2C error handler" 119 printk(KERN_ERR "Cannot install L2C error handler"
120 ", cache is not enabled\n"); 120 ", cache is not enabled\n");
121 of_node_put(np); 121 of_node_put(np);
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 3d93a8ded0f8..63762c672a03 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -134,11 +134,10 @@ static void xics_request_ipi(void)
134 BUG_ON(ipi == NO_IRQ); 134 BUG_ON(ipi == NO_IRQ);
135 135
136 /* 136 /*
137 * IPIs are marked IRQF_DISABLED as they must run with irqs 137 * IPIs are marked IRQF_PERCPU. The handler was set in map.
138 * disabled, and PERCPU. The handler was set in map.
139 */ 138 */
140 BUG_ON(request_irq(ipi, icp_ops->ipi_action, 139 BUG_ON(request_irq(ipi, icp_ops->ipi_action,
141 IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL)); 140 IRQF_PERCPU, "IPI", NULL));
142} 141}
143 142
144int __init xics_smp_probe(void) 143int __init xics_smp_probe(void)
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 0dca9a5c6be6..15d970328f71 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -151,8 +151,13 @@ typedef struct page *pgtable_t;
151#endif /* !__ASSEMBLY__ */ 151#endif /* !__ASSEMBLY__ */
152 152
153#ifdef CONFIG_UNCACHED_MAPPING 153#ifdef CONFIG_UNCACHED_MAPPING
154#if defined(CONFIG_29BIT)
155#define UNCAC_ADDR(addr) P2SEGADDR(addr)
156#define CAC_ADDR(addr) P1SEGADDR(addr)
157#else
154#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start) 158#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start)
155#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET) 159#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET)
160#endif
156#else 161#else
157#define UNCAC_ADDR(addr) ((addr)) 162#define UNCAC_ADDR(addr) ((addr))
158#define CAC_ADDR(addr) ((addr)) 163#define CAC_ADDR(addr) ((addr))
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index 3432008d2888..152b8627a184 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -375,8 +375,10 @@
375#define __NR_syncfs 362 375#define __NR_syncfs 362
376#define __NR_sendmmsg 363 376#define __NR_sendmmsg 363
377#define __NR_setns 364 377#define __NR_setns 364
378#define __NR_process_vm_readv 365
379#define __NR_process_vm_writev 366
378 380
379#define NR_syscalls 365 381#define NR_syscalls 367
380 382
381#ifdef __KERNEL__ 383#ifdef __KERNEL__
382 384
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index ec9898665f23..c330c23db5a0 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -396,10 +396,12 @@
396#define __NR_syncfs 373 396#define __NR_syncfs 373
397#define __NR_sendmmsg 374 397#define __NR_sendmmsg 374
398#define __NR_setns 375 398#define __NR_setns 375
399#define __NR_process_vm_readv 376
400#define __NR_process_vm_writev 377
399 401
400#ifdef __KERNEL__ 402#ifdef __KERNEL__
401 403
402#define NR_syscalls 376 404#define NR_syscalls 378
403 405
404#define __ARCH_WANT_IPC_PARSE_VERSION 406#define __ARCH_WANT_IPC_PARSE_VERSION
405#define __ARCH_WANT_OLD_READDIR 407#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
index a43124e608c3..0bd744f9a3b7 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -176,10 +176,12 @@ static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups,
176static struct plat_sci_port scif0_platform_data = { 176static struct plat_sci_port scif0_platform_data = {
177 .mapbase = 0xfffe8000, 177 .mapbase = 0xfffe8000,
178 .flags = UPF_BOOT_AUTOCONF, 178 .flags = UPF_BOOT_AUTOCONF,
179 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 179 .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
180 SCSCR_REIE,
180 .scbrr_algo_id = SCBRR_ALGO_2, 181 .scbrr_algo_id = SCBRR_ALGO_2,
181 .type = PORT_SCIF, 182 .type = PORT_SCIF,
182 .irqs = { 192, 192, 192, 192 }, 183 .irqs = { 192, 192, 192, 192 },
184 .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
183}; 185};
184 186
185static struct platform_device scif0_device = { 187static struct platform_device scif0_device = {
@@ -193,10 +195,12 @@ static struct platform_device scif0_device = {
193static struct plat_sci_port scif1_platform_data = { 195static struct plat_sci_port scif1_platform_data = {
194 .mapbase = 0xfffe8800, 196 .mapbase = 0xfffe8800,
195 .flags = UPF_BOOT_AUTOCONF, 197 .flags = UPF_BOOT_AUTOCONF,
196 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 198 .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
199 SCSCR_REIE,
197 .scbrr_algo_id = SCBRR_ALGO_2, 200 .scbrr_algo_id = SCBRR_ALGO_2,
198 .type = PORT_SCIF, 201 .type = PORT_SCIF,
199 .irqs = { 196, 196, 196, 196 }, 202 .irqs = { 196, 196, 196, 196 },
203 .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
200}; 204};
201 205
202static struct platform_device scif1_device = { 206static struct platform_device scif1_device = {
@@ -210,10 +214,12 @@ static struct platform_device scif1_device = {
210static struct plat_sci_port scif2_platform_data = { 214static struct plat_sci_port scif2_platform_data = {
211 .mapbase = 0xfffe9000, 215 .mapbase = 0xfffe9000,
212 .flags = UPF_BOOT_AUTOCONF, 216 .flags = UPF_BOOT_AUTOCONF,
213 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 217 .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
218 SCSCR_REIE,
214 .scbrr_algo_id = SCBRR_ALGO_2, 219 .scbrr_algo_id = SCBRR_ALGO_2,
215 .type = PORT_SCIF, 220 .type = PORT_SCIF,
216 .irqs = { 200, 200, 200, 200 }, 221 .irqs = { 200, 200, 200, 200 },
222 .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
217}; 223};
218 224
219static struct platform_device scif2_device = { 225static struct platform_device scif2_device = {
@@ -227,10 +233,12 @@ static struct platform_device scif2_device = {
227static struct plat_sci_port scif3_platform_data = { 233static struct plat_sci_port scif3_platform_data = {
228 .mapbase = 0xfffe9800, 234 .mapbase = 0xfffe9800,
229 .flags = UPF_BOOT_AUTOCONF, 235 .flags = UPF_BOOT_AUTOCONF,
230 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 236 .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
237 SCSCR_REIE,
231 .scbrr_algo_id = SCBRR_ALGO_2, 238 .scbrr_algo_id = SCBRR_ALGO_2,
232 .type = PORT_SCIF, 239 .type = PORT_SCIF,
233 .irqs = { 204, 204, 204, 204 }, 240 .irqs = { 204, 204, 204, 204 },
241 .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
234}; 242};
235 243
236static struct platform_device scif3_device = { 244static struct platform_device scif3_device = {
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
index 7d98f909a8ac..1cc257c9b1e3 100644
--- a/arch/sh/kernel/cpu/shmobile/cpuidle.c
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -26,11 +26,12 @@ static unsigned long cpuidle_mode[] = {
26}; 26};
27 27
28static int cpuidle_sleep_enter(struct cpuidle_device *dev, 28static int cpuidle_sleep_enter(struct cpuidle_device *dev,
29 struct cpuidle_state *state) 29 struct cpuidle_driver *drv,
30 int index)
30{ 31{
31 unsigned long allowed_mode = arch_hwblk_sleep_mode(); 32 unsigned long allowed_mode = arch_hwblk_sleep_mode();
32 ktime_t before, after; 33 ktime_t before, after;
33 int requested_state = state - &dev->states[0]; 34 int requested_state = index;
34 int allowed_state; 35 int allowed_state;
35 int k; 36 int k;
36 37
@@ -47,11 +48,13 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev,
47 */ 48 */
48 k = min_t(int, allowed_state, requested_state); 49 k = min_t(int, allowed_state, requested_state);
49 50
50 dev->last_state = &dev->states[k];
51 before = ktime_get(); 51 before = ktime_get();
52 sh_mobile_call_standby(cpuidle_mode[k]); 52 sh_mobile_call_standby(cpuidle_mode[k]);
53 after = ktime_get(); 53 after = ktime_get();
54 return ktime_to_ns(ktime_sub(after, before)) >> 10; 54
55 dev->last_residency = (int)ktime_to_ns(ktime_sub(after, before)) >> 10;
56
57 return k;
55} 58}
56 59
57static struct cpuidle_device cpuidle_dev; 60static struct cpuidle_device cpuidle_dev;
@@ -63,19 +66,19 @@ static struct cpuidle_driver cpuidle_driver = {
63void sh_mobile_setup_cpuidle(void) 66void sh_mobile_setup_cpuidle(void)
64{ 67{
65 struct cpuidle_device *dev = &cpuidle_dev; 68 struct cpuidle_device *dev = &cpuidle_dev;
69 struct cpuidle_driver *drv = &cpuidle_driver;
66 struct cpuidle_state *state; 70 struct cpuidle_state *state;
67 int i; 71 int i;
68 72
69 cpuidle_register_driver(&cpuidle_driver);
70 73
71 for (i = 0; i < CPUIDLE_STATE_MAX; i++) { 74 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
72 dev->states[i].name[0] = '\0'; 75 drv->states[i].name[0] = '\0';
73 dev->states[i].desc[0] = '\0'; 76 drv->states[i].desc[0] = '\0';
74 } 77 }
75 78
76 i = CPUIDLE_DRIVER_STATE_START; 79 i = CPUIDLE_DRIVER_STATE_START;
77 80
78 state = &dev->states[i++]; 81 state = &drv->states[i++];
79 snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); 82 snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
80 strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN); 83 strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
81 state->exit_latency = 1; 84 state->exit_latency = 1;
@@ -85,10 +88,10 @@ void sh_mobile_setup_cpuidle(void)
85 state->flags |= CPUIDLE_FLAG_TIME_VALID; 88 state->flags |= CPUIDLE_FLAG_TIME_VALID;
86 state->enter = cpuidle_sleep_enter; 89 state->enter = cpuidle_sleep_enter;
87 90
88 dev->safe_state = state; 91 drv->safe_state_index = i-1;
89 92
90 if (sh_mobile_sleep_supported & SUSP_SH_SF) { 93 if (sh_mobile_sleep_supported & SUSP_SH_SF) {
91 state = &dev->states[i++]; 94 state = &drv->states[i++];
92 snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); 95 snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
93 strncpy(state->desc, "SuperH Sleep Mode [SF]", 96 strncpy(state->desc, "SuperH Sleep Mode [SF]",
94 CPUIDLE_DESC_LEN); 97 CPUIDLE_DESC_LEN);
@@ -101,7 +104,7 @@ void sh_mobile_setup_cpuidle(void)
101 } 104 }
102 105
103 if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) { 106 if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) {
104 state = &dev->states[i++]; 107 state = &drv->states[i++];
105 snprintf(state->name, CPUIDLE_NAME_LEN, "C3"); 108 snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
106 strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", 109 strncpy(state->desc, "SuperH Mobile Standby Mode [SF]",
107 CPUIDLE_DESC_LEN); 110 CPUIDLE_DESC_LEN);
@@ -113,7 +116,10 @@ void sh_mobile_setup_cpuidle(void)
113 state->enter = cpuidle_sleep_enter; 116 state->enter = cpuidle_sleep_enter;
114 } 117 }
115 118
119 drv->state_count = i;
116 dev->state_count = i; 120 dev->state_count = i;
117 121
122 cpuidle_register_driver(&cpuidle_driver);
123
118 cpuidle_register_device(dev); 124 cpuidle_register_device(dev);
119} 125}
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 293e39c59c00..ee56a9b1a981 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -382,3 +382,5 @@ ENTRY(sys_call_table)
382 .long sys_syncfs 382 .long sys_syncfs
383 .long sys_sendmmsg 383 .long sys_sendmmsg
384 .long sys_setns 384 .long sys_setns
385 .long sys_process_vm_readv /* 365 */
386 .long sys_process_vm_writev
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index ceb34b94afa9..9af7de26fb71 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -402,3 +402,5 @@ sys_call_table:
402 .long sys_syncfs 402 .long sys_syncfs
403 .long sys_sendmmsg 403 .long sys_sendmmsg
404 .long sys_setns /* 375 */ 404 .long sys_setns /* 375 */
405 .long sys_process_vm_readv
406 .long sys_process_vm_writev
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index 6260d5deeabc..c7cb0af0eb59 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -406,8 +406,10 @@
406#define __NR_syncfs 335 406#define __NR_syncfs 335
407#define __NR_sendmmsg 336 407#define __NR_sendmmsg 336
408#define __NR_setns 337 408#define __NR_setns 337
409#define __NR_process_vm_readv 338
410#define __NR_process_vm_writev 339
409 411
410#define NR_syscalls 338 412#define NR_syscalls 340
411 413
412#ifdef __32bit_syscall_numbers__ 414#ifdef __32bit_syscall_numbers__
413/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, 415/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 09d8ec454450..63402f9e9f51 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -84,4 +84,4 @@ sys_call_table:
84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init 85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns 87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index edbec45d4688..db86b1a0e9a9 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -85,7 +85,7 @@ sys_call_table32:
85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv 85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init 86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
87/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime 87/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
89 89
90#endif /* CONFIG_COMPAT */ 90#endif /* CONFIG_COMPAT */
91 91
@@ -162,4 +162,4 @@ sys_call_table:
162/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 162/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
163 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init 163 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
164/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 164/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
165 .word sys_syncfs, sys_sendmmsg, sys_setns 165 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index 28071bb31db7..4c61b52191eb 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -109,7 +109,7 @@ static __init void sdv_serial_fixup(void)
109} 109}
110 110
111#else 111#else
112static inline void sdv_serial_fixup(void); 112static inline void sdv_serial_fixup(void) {};
113#endif 113#endif
114 114
115static void __init sdv_arch_setup(void) 115static void __init sdv_arch_setup(void)
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index 6ed7afdaf4af..541020df0da6 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -608,6 +608,7 @@ static void *msic_ocd_platform_data(void *info)
608} 608}
609 609
610static const struct devs_id __initconst device_ids[] = { 610static const struct devs_id __initconst device_ids[] = {
611 {"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data},
611 {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data}, 612 {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
612 {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data}, 613 {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data},
613 {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data}, 614 {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
diff --git a/arch/x86/platform/mrst/pmu.c b/arch/x86/platform/mrst/pmu.c
index 9281da7d91bd..c0ac06da57ac 100644
--- a/arch/x86/platform/mrst/pmu.c
+++ b/arch/x86/platform/mrst/pmu.c
@@ -70,7 +70,7 @@ static struct mrst_device mrst_devs[] = {
70/* 24 */ { 0x4110, 0 }, /* Lincroft */ 70/* 24 */ { 0x4110, 0 }, /* Lincroft */
71}; 71};
72 72
73/* n.b. We ignore PCI-id 0x815 in LSS9 b/c MeeGo has no driver for it */ 73/* n.b. We ignore PCI-id 0x815 in LSS9 b/c Linux has no driver for it */
74static u16 mrst_lss9_pci_ids[] = {0x080a, 0x0814, 0}; 74static u16 mrst_lss9_pci_ids[] = {0x080a, 0x0814, 0};
75static u16 mrst_lss10_pci_ids[] = {0x0800, 0x0801, 0x0802, 0x0803, 75static u16 mrst_lss10_pci_ids[] = {0x0800, 0x0801, 0x0802, 0x0803,
76 0x0804, 0x0805, 0x080f, 0}; 76 0x0804, 0x0805, 0x080f, 0};
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c
index a8ac6f1eb66d..225bd0f0f675 100644
--- a/arch/x86/platform/mrst/vrtc.c
+++ b/arch/x86/platform/mrst/vrtc.c
@@ -76,8 +76,8 @@ unsigned long vrtc_get_time(void)
76 76
77 spin_unlock_irqrestore(&rtc_lock, flags); 77 spin_unlock_irqrestore(&rtc_lock, flags);
78 78
79 /* vRTC YEAR reg contains the offset to 1960 */ 79 /* vRTC YEAR reg contains the offset to 1972 */
80 year += 1960; 80 year += 1972;
81 81
82 printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d " 82 printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d "
83 "mon: %d year: %d\n", sec, min, hour, mday, mon, year); 83 "mon: %d year: %d\n", sec, min, hour, mday, mon, year);
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index a816f24f2d52..a0f768c1d9aa 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -383,6 +383,7 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
383 return 0; 383 return 0;
384} 384}
385 385
386#ifdef CONFIG_NET
386static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 387static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
387{ 388{
388 struct crypto_report_blkcipher rblkcipher; 389 struct crypto_report_blkcipher rblkcipher;
@@ -404,6 +405,12 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
404nla_put_failure: 405nla_put_failure:
405 return -EMSGSIZE; 406 return -EMSGSIZE;
406} 407}
408#else
409static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
410{
411 return -ENOSYS;
412}
413#endif
407 414
408static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) 415static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
409 __attribute__ ((unused)); 416 __attribute__ ((unused));
@@ -457,6 +464,7 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
457 return 0; 464 return 0;
458} 465}
459 466
467#ifdef CONFIG_NET
460static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 468static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
461{ 469{
462 struct crypto_report_blkcipher rblkcipher; 470 struct crypto_report_blkcipher rblkcipher;
@@ -478,6 +486,12 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
478nla_put_failure: 486nla_put_failure:
479 return -EMSGSIZE; 487 return -EMSGSIZE;
480} 488}
489#else
490static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
491{
492 return -ENOSYS;
493}
494#endif
481 495
482static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) 496static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
483 __attribute__ ((unused)); 497 __attribute__ ((unused));
diff --git a/crypto/aead.c b/crypto/aead.c
index 701556ffaaef..04add3dca6fe 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -111,6 +111,7 @@ static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
111 return 0; 111 return 0;
112} 112}
113 113
114#ifdef CONFIG_NET
114static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) 115static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
115{ 116{
116 struct crypto_report_aead raead; 117 struct crypto_report_aead raead;
@@ -132,6 +133,12 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
132nla_put_failure: 133nla_put_failure:
133 return -EMSGSIZE; 134 return -EMSGSIZE;
134} 135}
136#else
137static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
138{
139 return -ENOSYS;
140}
141#endif
135 142
136static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) 143static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
137 __attribute__ ((unused)); 144 __attribute__ ((unused));
@@ -190,6 +197,7 @@ static int crypto_init_nivaead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
190 return 0; 197 return 0;
191} 198}
192 199
200#ifdef CONFIG_NET
193static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg) 201static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
194{ 202{
195 struct crypto_report_aead raead; 203 struct crypto_report_aead raead;
@@ -210,6 +218,12 @@ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
210nla_put_failure: 218nla_put_failure:
211 return -EMSGSIZE; 219 return -EMSGSIZE;
212} 220}
221#else
222static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
223{
224 return -ENOSYS;
225}
226#endif
213 227
214 228
215static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) 229static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
diff --git a/crypto/ahash.c b/crypto/ahash.c
index a3e6ef99394a..ac93c99cfae8 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -399,6 +399,7 @@ static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
399 return sizeof(struct crypto_shash *); 399 return sizeof(struct crypto_shash *);
400} 400}
401 401
402#ifdef CONFIG_NET
402static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) 403static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
403{ 404{
404 struct crypto_report_hash rhash; 405 struct crypto_report_hash rhash;
@@ -416,6 +417,12 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
416nla_put_failure: 417nla_put_failure:
417 return -EMSGSIZE; 418 return -EMSGSIZE;
418} 419}
420#else
421static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
422{
423 return -ENOSYS;
424}
425#endif
419 426
420static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 427static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
421 __attribute__ ((unused)); 428 __attribute__ ((unused));
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 2572d2600136..1e61d1a888b2 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -494,6 +494,7 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
494 return crypto_init_blkcipher_ops_async(tfm); 494 return crypto_init_blkcipher_ops_async(tfm);
495} 495}
496 496
497#ifdef CONFIG_NET
497static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 498static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
498{ 499{
499 struct crypto_report_blkcipher rblkcipher; 500 struct crypto_report_blkcipher rblkcipher;
@@ -515,6 +516,12 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
515nla_put_failure: 516nla_put_failure:
516 return -EMSGSIZE; 517 return -EMSGSIZE;
517} 518}
519#else
520static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
521{
522 return -ENOSYS;
523}
524#endif
518 525
519static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) 526static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
520 __attribute__ ((unused)); 527 __attribute__ ((unused));
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 2abca780312d..0605a2bbba75 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -44,9 +44,6 @@ static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
44 44
45 down_read(&crypto_alg_sem); 45 down_read(&crypto_alg_sem);
46 46
47 if (list_empty(&crypto_alg_list))
48 return NULL;
49
50 list_for_each_entry(q, &crypto_alg_list, cra_list) { 47 list_for_each_entry(q, &crypto_alg_list, cra_list) {
51 int match = 0; 48 int match = 0;
52 49
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
index fefda78a6a2a..2e458e5482d0 100644
--- a/crypto/pcompress.c
+++ b/crypto/pcompress.c
@@ -48,6 +48,7 @@ static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm)
48 return 0; 48 return 0;
49} 49}
50 50
51#ifdef CONFIG_NET
51static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg) 52static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
52{ 53{
53 struct crypto_report_comp rpcomp; 54 struct crypto_report_comp rpcomp;
@@ -62,6 +63,12 @@ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
62nla_put_failure: 63nla_put_failure:
63 return -EMSGSIZE; 64 return -EMSGSIZE;
64} 65}
66#else
67static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
68{
69 return -ENOSYS;
70}
71#endif
65 72
66static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg) 73static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
67 __attribute__ ((unused)); 74 __attribute__ ((unused));
diff --git a/crypto/rng.c b/crypto/rng.c
index feb7de00f437..64f864fa8043 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -60,6 +60,7 @@ static int crypto_init_rng_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
60 return 0; 60 return 0;
61} 61}
62 62
63#ifdef CONFIG_NET
63static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) 64static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
64{ 65{
65 struct crypto_report_rng rrng; 66 struct crypto_report_rng rrng;
@@ -76,6 +77,12 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
76nla_put_failure: 77nla_put_failure:
77 return -EMSGSIZE; 78 return -EMSGSIZE;
78} 79}
80#else
81static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
82{
83 return -ENOSYS;
84}
85#endif
79 86
80static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) 87static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
81 __attribute__ ((unused)); 88 __attribute__ ((unused));
diff --git a/crypto/shash.c b/crypto/shash.c
index ea8a9c6e21e3..9100912716ae 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -524,6 +524,7 @@ static unsigned int crypto_shash_extsize(struct crypto_alg *alg)
524 return alg->cra_ctxsize; 524 return alg->cra_ctxsize;
525} 525}
526 526
527#ifdef CONFIG_NET
527static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg) 528static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
528{ 529{
529 struct crypto_report_hash rhash; 530 struct crypto_report_hash rhash;
@@ -541,6 +542,12 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
541nla_put_failure: 542nla_put_failure:
542 return -EMSGSIZE; 543 return -EMSGSIZE;
543} 544}
545#else
546static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
547{
548 return -ENOSYS;
549}
550#endif
544 551
545static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) 552static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
546 __attribute__ ((unused)); 553 __attribute__ ((unused));
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 55accb7018bb..cc70f3fdcdd1 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -269,16 +269,17 @@ acpi_status acpi_hw_clear_acpi_status(void)
269 269
270 status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, 270 status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
271 ACPI_BITMASK_ALL_FIXED_STATUS); 271 ACPI_BITMASK_ALL_FIXED_STATUS);
272 if (ACPI_FAILURE(status)) { 272
273 goto unlock_and_exit; 273 acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
274 } 274
275 if (ACPI_FAILURE(status))
276 goto exit;
275 277
276 /* Clear the GPE Bits in all GPE registers in all GPE blocks */ 278 /* Clear the GPE Bits in all GPE registers in all GPE blocks */
277 279
278 status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL); 280 status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL);
279 281
280 unlock_and_exit: 282exit:
281 acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
282 return_ACPI_STATUS(status); 283 return_ACPI_STATUS(status);
283} 284}
284 285
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
index 04ae1c88c03c..cfc0cc10af39 100644
--- a/drivers/acpi/atomicio.c
+++ b/drivers/acpi/atomicio.c
@@ -76,7 +76,7 @@ static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
76{ 76{
77 struct acpi_iomap *map; 77 struct acpi_iomap *map;
78 78
79 map = __acpi_find_iomap(paddr, size); 79 map = __acpi_find_iomap(paddr, size/8);
80 if (map) 80 if (map)
81 return map->vaddr + (paddr - map->paddr); 81 return map->vaddr + (paddr - map->paddr);
82 else 82 else
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 437ddbf0c49a..9ecec98bc76e 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -911,10 +911,7 @@ void __init acpi_early_init(void)
911 } 911 }
912#endif 912#endif
913 913
914 status = 914 status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE);
915 acpi_enable_subsystem(~
916 (ACPI_NO_HARDWARE_INIT |
917 ACPI_NO_ACPI_ENABLE));
918 if (ACPI_FAILURE(status)) { 915 if (ACPI_FAILURE(status)) {
919 printk(KERN_ERR PREFIX "Unable to enable ACPI\n"); 916 printk(KERN_ERR PREFIX "Unable to enable ACPI\n");
920 goto error0; 917 goto error0;
@@ -935,8 +932,7 @@ static int __init acpi_bus_init(void)
935 932
936 acpi_os_initialize1(); 933 acpi_os_initialize1();
937 934
938 status = 935 status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
939 acpi_enable_subsystem(ACPI_NO_HARDWARE_INIT | ACPI_NO_ACPI_ENABLE);
940 if (ACPI_FAILURE(status)) { 936 if (ACPI_FAILURE(status)) {
941 printk(KERN_ERR PREFIX 937 printk(KERN_ERR PREFIX
942 "Unable to start the ACPI Interpreter\n"); 938 "Unable to start the ACPI Interpreter\n");
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index a4e0f1ba6040..9d7bc9f6b6cc 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -426,7 +426,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
426 426
427 if (action == CPU_ONLINE && pr) { 427 if (action == CPU_ONLINE && pr) {
428 acpi_processor_ppc_has_changed(pr, 0); 428 acpi_processor_ppc_has_changed(pr, 0);
429 acpi_processor_cst_has_changed(pr); 429 acpi_processor_hotplug(pr);
430 acpi_processor_reevaluate_tstate(pr, action); 430 acpi_processor_reevaluate_tstate(pr, action);
431 acpi_processor_tstate_has_changed(pr); 431 acpi_processor_tstate_has_changed(pr);
432 } 432 }
@@ -503,8 +503,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
503 acpi_processor_get_throttling_info(pr); 503 acpi_processor_get_throttling_info(pr);
504 acpi_processor_get_limit_info(pr); 504 acpi_processor_get_limit_info(pr);
505 505
506 506 if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
507 if (cpuidle_get_driver() == &acpi_idle_driver)
508 acpi_processor_power_init(pr, device); 507 acpi_processor_power_init(pr, device);
509 508
510 pr->cdev = thermal_cooling_device_register("Processor", device, 509 pr->cdev = thermal_cooling_device_register("Processor", device,
@@ -800,17 +799,9 @@ static int __init acpi_processor_init(void)
800 799
801 memset(&errata, 0, sizeof(errata)); 800 memset(&errata, 0, sizeof(errata));
802 801
803 if (!cpuidle_register_driver(&acpi_idle_driver)) {
804 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
805 acpi_idle_driver.name);
806 } else {
807 printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
808 cpuidle_get_driver()->name);
809 }
810
811 result = acpi_bus_register_driver(&acpi_processor_driver); 802 result = acpi_bus_register_driver(&acpi_processor_driver);
812 if (result < 0) 803 if (result < 0)
813 goto out_cpuidle; 804 return result;
814 805
815 acpi_processor_install_hotplug_notify(); 806 acpi_processor_install_hotplug_notify();
816 807
@@ -821,11 +812,6 @@ static int __init acpi_processor_init(void)
821 acpi_processor_throttling_init(); 812 acpi_processor_throttling_init();
822 813
823 return 0; 814 return 0;
824
825out_cpuidle:
826 cpuidle_unregister_driver(&acpi_idle_driver);
827
828 return result;
829} 815}
830 816
831static void __exit acpi_processor_exit(void) 817static void __exit acpi_processor_exit(void)
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 9b88f9828d8c..0e8e2de2ed3e 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -224,7 +224,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
224/* 224/*
225 * Suspend / resume control 225 * Suspend / resume control
226 */ 226 */
227static int acpi_idle_suspend;
228static u32 saved_bm_rld; 227static u32 saved_bm_rld;
229 228
230static void acpi_idle_bm_rld_save(void) 229static void acpi_idle_bm_rld_save(void)
@@ -243,21 +242,13 @@ static void acpi_idle_bm_rld_restore(void)
243 242
244int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) 243int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
245{ 244{
246 if (acpi_idle_suspend == 1)
247 return 0;
248
249 acpi_idle_bm_rld_save(); 245 acpi_idle_bm_rld_save();
250 acpi_idle_suspend = 1;
251 return 0; 246 return 0;
252} 247}
253 248
254int acpi_processor_resume(struct acpi_device * device) 249int acpi_processor_resume(struct acpi_device * device)
255{ 250{
256 if (acpi_idle_suspend == 0)
257 return 0;
258
259 acpi_idle_bm_rld_restore(); 251 acpi_idle_bm_rld_restore();
260 acpi_idle_suspend = 0;
261 return 0; 252 return 0;
262} 253}
263 254
@@ -741,66 +732,65 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
741/** 732/**
742 * acpi_idle_enter_c1 - enters an ACPI C1 state-type 733 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
743 * @dev: the target CPU 734 * @dev: the target CPU
744 * @state: the state data 735 * @drv: cpuidle driver containing cpuidle state info
736 * @index: index of target state
745 * 737 *
746 * This is equivalent to the HALT instruction. 738 * This is equivalent to the HALT instruction.
747 */ 739 */
748static int acpi_idle_enter_c1(struct cpuidle_device *dev, 740static int acpi_idle_enter_c1(struct cpuidle_device *dev,
749 struct cpuidle_state *state) 741 struct cpuidle_driver *drv, int index)
750{ 742{
751 ktime_t kt1, kt2; 743 ktime_t kt1, kt2;
752 s64 idle_time; 744 s64 idle_time;
753 struct acpi_processor *pr; 745 struct acpi_processor *pr;
754 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 746 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
747 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
755 748
756 pr = __this_cpu_read(processors); 749 pr = __this_cpu_read(processors);
750 dev->last_residency = 0;
757 751
758 if (unlikely(!pr)) 752 if (unlikely(!pr))
759 return 0; 753 return -EINVAL;
760 754
761 local_irq_disable(); 755 local_irq_disable();
762 756
763 /* Do not access any ACPI IO ports in suspend path */
764 if (acpi_idle_suspend) {
765 local_irq_enable();
766 cpu_relax();
767 return 0;
768 }
769
770 lapic_timer_state_broadcast(pr, cx, 1); 757 lapic_timer_state_broadcast(pr, cx, 1);
771 kt1 = ktime_get_real(); 758 kt1 = ktime_get_real();
772 acpi_idle_do_entry(cx); 759 acpi_idle_do_entry(cx);
773 kt2 = ktime_get_real(); 760 kt2 = ktime_get_real();
774 idle_time = ktime_to_us(ktime_sub(kt2, kt1)); 761 idle_time = ktime_to_us(ktime_sub(kt2, kt1));
775 762
763 /* Update device last_residency*/
764 dev->last_residency = (int)idle_time;
765
776 local_irq_enable(); 766 local_irq_enable();
777 cx->usage++; 767 cx->usage++;
778 lapic_timer_state_broadcast(pr, cx, 0); 768 lapic_timer_state_broadcast(pr, cx, 0);
779 769
780 return idle_time; 770 return index;
781} 771}
782 772
783/** 773/**
784 * acpi_idle_enter_simple - enters an ACPI state without BM handling 774 * acpi_idle_enter_simple - enters an ACPI state without BM handling
785 * @dev: the target CPU 775 * @dev: the target CPU
786 * @state: the state data 776 * @drv: cpuidle driver with cpuidle state information
777 * @index: the index of suggested state
787 */ 778 */
788static int acpi_idle_enter_simple(struct cpuidle_device *dev, 779static int acpi_idle_enter_simple(struct cpuidle_device *dev,
789 struct cpuidle_state *state) 780 struct cpuidle_driver *drv, int index)
790{ 781{
791 struct acpi_processor *pr; 782 struct acpi_processor *pr;
792 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 783 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
784 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
793 ktime_t kt1, kt2; 785 ktime_t kt1, kt2;
794 s64 idle_time_ns; 786 s64 idle_time_ns;
795 s64 idle_time; 787 s64 idle_time;
796 788
797 pr = __this_cpu_read(processors); 789 pr = __this_cpu_read(processors);
790 dev->last_residency = 0;
798 791
799 if (unlikely(!pr)) 792 if (unlikely(!pr))
800 return 0; 793 return -EINVAL;
801
802 if (acpi_idle_suspend)
803 return(acpi_idle_enter_c1(dev, state));
804 794
805 local_irq_disable(); 795 local_irq_disable();
806 796
@@ -815,7 +805,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
815 if (unlikely(need_resched())) { 805 if (unlikely(need_resched())) {
816 current_thread_info()->status |= TS_POLLING; 806 current_thread_info()->status |= TS_POLLING;
817 local_irq_enable(); 807 local_irq_enable();
818 return 0; 808 return -EINVAL;
819 } 809 }
820 } 810 }
821 811
@@ -837,6 +827,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
837 idle_time = idle_time_ns; 827 idle_time = idle_time_ns;
838 do_div(idle_time, NSEC_PER_USEC); 828 do_div(idle_time, NSEC_PER_USEC);
839 829
830 /* Update device last_residency*/
831 dev->last_residency = (int)idle_time;
832
840 /* Tell the scheduler how much we idled: */ 833 /* Tell the scheduler how much we idled: */
841 sched_clock_idle_wakeup_event(idle_time_ns); 834 sched_clock_idle_wakeup_event(idle_time_ns);
842 835
@@ -848,7 +841,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
848 841
849 lapic_timer_state_broadcast(pr, cx, 0); 842 lapic_timer_state_broadcast(pr, cx, 0);
850 cx->time += idle_time; 843 cx->time += idle_time;
851 return idle_time; 844 return index;
852} 845}
853 846
854static int c3_cpu_count; 847static int c3_cpu_count;
@@ -857,37 +850,37 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
857/** 850/**
858 * acpi_idle_enter_bm - enters C3 with proper BM handling 851 * acpi_idle_enter_bm - enters C3 with proper BM handling
859 * @dev: the target CPU 852 * @dev: the target CPU
860 * @state: the state data 853 * @drv: cpuidle driver containing state data
854 * @index: the index of suggested state
861 * 855 *
862 * If BM is detected, the deepest non-C3 idle state is entered instead. 856 * If BM is detected, the deepest non-C3 idle state is entered instead.
863 */ 857 */
864static int acpi_idle_enter_bm(struct cpuidle_device *dev, 858static int acpi_idle_enter_bm(struct cpuidle_device *dev,
865 struct cpuidle_state *state) 859 struct cpuidle_driver *drv, int index)
866{ 860{
867 struct acpi_processor *pr; 861 struct acpi_processor *pr;
868 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 862 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
863 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
869 ktime_t kt1, kt2; 864 ktime_t kt1, kt2;
870 s64 idle_time_ns; 865 s64 idle_time_ns;
871 s64 idle_time; 866 s64 idle_time;
872 867
873 868
874 pr = __this_cpu_read(processors); 869 pr = __this_cpu_read(processors);
870 dev->last_residency = 0;
875 871
876 if (unlikely(!pr)) 872 if (unlikely(!pr))
877 return 0; 873 return -EINVAL;
878
879 if (acpi_idle_suspend)
880 return(acpi_idle_enter_c1(dev, state));
881 874
882 if (!cx->bm_sts_skip && acpi_idle_bm_check()) { 875 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
883 if (dev->safe_state) { 876 if (drv->safe_state_index >= 0) {
884 dev->last_state = dev->safe_state; 877 return drv->states[drv->safe_state_index].enter(dev,
885 return dev->safe_state->enter(dev, dev->safe_state); 878 drv, drv->safe_state_index);
886 } else { 879 } else {
887 local_irq_disable(); 880 local_irq_disable();
888 acpi_safe_halt(); 881 acpi_safe_halt();
889 local_irq_enable(); 882 local_irq_enable();
890 return 0; 883 return -EINVAL;
891 } 884 }
892 } 885 }
893 886
@@ -904,7 +897,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
904 if (unlikely(need_resched())) { 897 if (unlikely(need_resched())) {
905 current_thread_info()->status |= TS_POLLING; 898 current_thread_info()->status |= TS_POLLING;
906 local_irq_enable(); 899 local_irq_enable();
907 return 0; 900 return -EINVAL;
908 } 901 }
909 } 902 }
910 903
@@ -954,6 +947,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
954 idle_time = idle_time_ns; 947 idle_time = idle_time_ns;
955 do_div(idle_time, NSEC_PER_USEC); 948 do_div(idle_time, NSEC_PER_USEC);
956 949
950 /* Update device last_residency*/
951 dev->last_residency = (int)idle_time;
952
957 /* Tell the scheduler how much we idled: */ 953 /* Tell the scheduler how much we idled: */
958 sched_clock_idle_wakeup_event(idle_time_ns); 954 sched_clock_idle_wakeup_event(idle_time_ns);
959 955
@@ -965,7 +961,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
965 961
966 lapic_timer_state_broadcast(pr, cx, 0); 962 lapic_timer_state_broadcast(pr, cx, 0);
967 cx->time += idle_time; 963 cx->time += idle_time;
968 return idle_time; 964 return index;
969} 965}
970 966
971struct cpuidle_driver acpi_idle_driver = { 967struct cpuidle_driver acpi_idle_driver = {
@@ -974,14 +970,16 @@ struct cpuidle_driver acpi_idle_driver = {
974}; 970};
975 971
976/** 972/**
977 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE 973 * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
974 * device i.e. per-cpu data
975 *
978 * @pr: the ACPI processor 976 * @pr: the ACPI processor
979 */ 977 */
980static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) 978static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
981{ 979{
982 int i, count = CPUIDLE_DRIVER_STATE_START; 980 int i, count = CPUIDLE_DRIVER_STATE_START;
983 struct acpi_processor_cx *cx; 981 struct acpi_processor_cx *cx;
984 struct cpuidle_state *state; 982 struct cpuidle_state_usage *state_usage;
985 struct cpuidle_device *dev = &pr->power.dev; 983 struct cpuidle_device *dev = &pr->power.dev;
986 984
987 if (!pr->flags.power_setup_done) 985 if (!pr->flags.power_setup_done)
@@ -992,9 +990,62 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
992 } 990 }
993 991
994 dev->cpu = pr->id; 992 dev->cpu = pr->id;
993
994 if (max_cstate == 0)
995 max_cstate = 1;
996
997 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
998 cx = &pr->power.states[i];
999 state_usage = &dev->states_usage[count];
1000
1001 if (!cx->valid)
1002 continue;
1003
1004#ifdef CONFIG_HOTPLUG_CPU
1005 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1006 !pr->flags.has_cst &&
1007 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1008 continue;
1009#endif
1010
1011 cpuidle_set_statedata(state_usage, cx);
1012
1013 count++;
1014 if (count == CPUIDLE_STATE_MAX)
1015 break;
1016 }
1017
1018 dev->state_count = count;
1019
1020 if (!count)
1021 return -EINVAL;
1022
1023 return 0;
1024}
1025
1026/**
1027 * acpi_processor_setup_cpuidle states- prepares and configures cpuidle
1028 * global state data i.e. idle routines
1029 *
1030 * @pr: the ACPI processor
1031 */
1032static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1033{
1034 int i, count = CPUIDLE_DRIVER_STATE_START;
1035 struct acpi_processor_cx *cx;
1036 struct cpuidle_state *state;
1037 struct cpuidle_driver *drv = &acpi_idle_driver;
1038
1039 if (!pr->flags.power_setup_done)
1040 return -EINVAL;
1041
1042 if (pr->flags.power == 0)
1043 return -EINVAL;
1044
1045 drv->safe_state_index = -1;
995 for (i = 0; i < CPUIDLE_STATE_MAX; i++) { 1046 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
996 dev->states[i].name[0] = '\0'; 1047 drv->states[i].name[0] = '\0';
997 dev->states[i].desc[0] = '\0'; 1048 drv->states[i].desc[0] = '\0';
998 } 1049 }
999 1050
1000 if (max_cstate == 0) 1051 if (max_cstate == 0)
@@ -1002,7 +1053,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1002 1053
1003 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 1054 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1004 cx = &pr->power.states[i]; 1055 cx = &pr->power.states[i];
1005 state = &dev->states[count];
1006 1056
1007 if (!cx->valid) 1057 if (!cx->valid)
1008 continue; 1058 continue;
@@ -1013,8 +1063,8 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1013 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 1063 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1014 continue; 1064 continue;
1015#endif 1065#endif
1016 cpuidle_set_statedata(state, cx);
1017 1066
1067 state = &drv->states[count];
1018 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 1068 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1019 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 1069 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
1020 state->exit_latency = cx->latency; 1070 state->exit_latency = cx->latency;
@@ -1027,13 +1077,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1027 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1077 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1028 1078
1029 state->enter = acpi_idle_enter_c1; 1079 state->enter = acpi_idle_enter_c1;
1030 dev->safe_state = state; 1080 drv->safe_state_index = count;
1031 break; 1081 break;
1032 1082
1033 case ACPI_STATE_C2: 1083 case ACPI_STATE_C2:
1034 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1084 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1035 state->enter = acpi_idle_enter_simple; 1085 state->enter = acpi_idle_enter_simple;
1036 dev->safe_state = state; 1086 drv->safe_state_index = count;
1037 break; 1087 break;
1038 1088
1039 case ACPI_STATE_C3: 1089 case ACPI_STATE_C3:
@@ -1049,7 +1099,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1049 break; 1099 break;
1050 } 1100 }
1051 1101
1052 dev->state_count = count; 1102 drv->state_count = count;
1053 1103
1054 if (!count) 1104 if (!count)
1055 return -EINVAL; 1105 return -EINVAL;
@@ -1057,7 +1107,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1057 return 0; 1107 return 0;
1058} 1108}
1059 1109
1060int acpi_processor_cst_has_changed(struct acpi_processor *pr) 1110int acpi_processor_hotplug(struct acpi_processor *pr)
1061{ 1111{
1062 int ret = 0; 1112 int ret = 0;
1063 1113
@@ -1078,7 +1128,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1078 cpuidle_disable_device(&pr->power.dev); 1128 cpuidle_disable_device(&pr->power.dev);
1079 acpi_processor_get_power_info(pr); 1129 acpi_processor_get_power_info(pr);
1080 if (pr->flags.power) { 1130 if (pr->flags.power) {
1081 acpi_processor_setup_cpuidle(pr); 1131 acpi_processor_setup_cpuidle_cx(pr);
1082 ret = cpuidle_enable_device(&pr->power.dev); 1132 ret = cpuidle_enable_device(&pr->power.dev);
1083 } 1133 }
1084 cpuidle_resume_and_unlock(); 1134 cpuidle_resume_and_unlock();
@@ -1086,10 +1136,72 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1086 return ret; 1136 return ret;
1087} 1137}
1088 1138
1139int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1140{
1141 int cpu;
1142 struct acpi_processor *_pr;
1143
1144 if (disabled_by_idle_boot_param())
1145 return 0;
1146
1147 if (!pr)
1148 return -EINVAL;
1149
1150 if (nocst)
1151 return -ENODEV;
1152
1153 if (!pr->flags.power_setup_done)
1154 return -ENODEV;
1155
1156 /*
1157 * FIXME: Design the ACPI notification to make it once per
1158 * system instead of once per-cpu. This condition is a hack
1159 * to make the code that updates C-States be called once.
1160 */
1161
1162 if (smp_processor_id() == 0 &&
1163 cpuidle_get_driver() == &acpi_idle_driver) {
1164
1165 cpuidle_pause_and_lock();
1166 /* Protect against cpu-hotplug */
1167 get_online_cpus();
1168
1169 /* Disable all cpuidle devices */
1170 for_each_online_cpu(cpu) {
1171 _pr = per_cpu(processors, cpu);
1172 if (!_pr || !_pr->flags.power_setup_done)
1173 continue;
1174 cpuidle_disable_device(&_pr->power.dev);
1175 }
1176
1177 /* Populate Updated C-state information */
1178 acpi_processor_setup_cpuidle_states(pr);
1179
1180 /* Enable all cpuidle devices */
1181 for_each_online_cpu(cpu) {
1182 _pr = per_cpu(processors, cpu);
1183 if (!_pr || !_pr->flags.power_setup_done)
1184 continue;
1185 acpi_processor_get_power_info(_pr);
1186 if (_pr->flags.power) {
1187 acpi_processor_setup_cpuidle_cx(_pr);
1188 cpuidle_enable_device(&_pr->power.dev);
1189 }
1190 }
1191 put_online_cpus();
1192 cpuidle_resume_and_unlock();
1193 }
1194
1195 return 0;
1196}
1197
1198static int acpi_processor_registered;
1199
1089int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, 1200int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1090 struct acpi_device *device) 1201 struct acpi_device *device)
1091{ 1202{
1092 acpi_status status = 0; 1203 acpi_status status = 0;
1204 int retval;
1093 static int first_run; 1205 static int first_run;
1094 1206
1095 if (disabled_by_idle_boot_param()) 1207 if (disabled_by_idle_boot_param())
@@ -1126,9 +1238,26 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1126 * platforms that only support C1. 1238 * platforms that only support C1.
1127 */ 1239 */
1128 if (pr->flags.power) { 1240 if (pr->flags.power) {
1129 acpi_processor_setup_cpuidle(pr); 1241 /* Register acpi_idle_driver if not already registered */
1130 if (cpuidle_register_device(&pr->power.dev)) 1242 if (!acpi_processor_registered) {
1131 return -EIO; 1243 acpi_processor_setup_cpuidle_states(pr);
1244 retval = cpuidle_register_driver(&acpi_idle_driver);
1245 if (retval)
1246 return retval;
1247 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
1248 acpi_idle_driver.name);
1249 }
1250 /* Register per-cpu cpuidle_device. Cpuidle driver
1251 * must already be registered before registering device
1252 */
1253 acpi_processor_setup_cpuidle_cx(pr);
1254 retval = cpuidle_register_device(&pr->power.dev);
1255 if (retval) {
1256 if (acpi_processor_registered == 0)
1257 cpuidle_unregister_driver(&acpi_idle_driver);
1258 return retval;
1259 }
1260 acpi_processor_registered++;
1132 } 1261 }
1133 return 0; 1262 return 0;
1134} 1263}
@@ -1139,8 +1268,13 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
1139 if (disabled_by_idle_boot_param()) 1268 if (disabled_by_idle_boot_param())
1140 return 0; 1269 return 0;
1141 1270
1142 cpuidle_unregister_device(&pr->power.dev); 1271 if (pr->flags.power) {
1143 pr->flags.power_setup_done = 0; 1272 cpuidle_unregister_device(&pr->power.dev);
1273 acpi_processor_registered--;
1274 if (acpi_processor_registered == 0)
1275 cpuidle_unregister_driver(&acpi_idle_driver);
1276 }
1144 1277
1278 pr->flags.power_setup_done = 0;
1145 return 0; 1279 return 0;
1146} 1280}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 449c556274c0..8ab80bafe3f1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1062,13 +1062,12 @@ static void acpi_add_id(struct acpi_device *device, const char *dev_id)
1062 if (!id) 1062 if (!id)
1063 return; 1063 return;
1064 1064
1065 id->id = kmalloc(strlen(dev_id) + 1, GFP_KERNEL); 1065 id->id = kstrdup(dev_id, GFP_KERNEL);
1066 if (!id->id) { 1066 if (!id->id) {
1067 kfree(id); 1067 kfree(id);
1068 return; 1068 return;
1069 } 1069 }
1070 1070
1071 strcpy(id->id, dev_id);
1072 list_add_tail(&id->list, &device->pnp.ids); 1071 list_add_tail(&id->list, &device->pnp.ids);
1073} 1072}
1074 1073
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index c538d0ef10ff..9f66181c814e 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -706,11 +706,23 @@ static void __exit interrupt_stats_exit(void)
706 return; 706 return;
707} 707}
708 708
709static ssize_t
710acpi_show_profile(struct device *dev, struct device_attribute *attr,
711 char *buf)
712{
713 return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
714}
715
716static const struct device_attribute pm_profile_attr =
717 __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
718
709int __init acpi_sysfs_init(void) 719int __init acpi_sysfs_init(void)
710{ 720{
711 int result; 721 int result;
712 722
713 result = acpi_tables_sysfs_init(); 723 result = acpi_tables_sysfs_init();
714 724 if (result)
725 return result;
726 result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
715 return result; 727 return result;
716} 728}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index fb7b90b05922..cf26222a93c5 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -390,6 +390,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
390 /* Promise */ 390 /* Promise */
391 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 391 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
392 392
393 /* Asmedia */
394 { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1061 */
395
393 /* Generic, PCI class code for AHCI */ 396 /* Generic, PCI class code for AHCI */
394 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 397 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
395 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, 398 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 004f2ce3dc73..ec555951176e 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -65,7 +65,7 @@ static struct scsi_host_template ahci_platform_sht = {
65static int __init ahci_probe(struct platform_device *pdev) 65static int __init ahci_probe(struct platform_device *pdev)
66{ 66{
67 struct device *dev = &pdev->dev; 67 struct device *dev = &pdev->dev;
68 struct ahci_platform_data *pdata = dev->platform_data; 68 struct ahci_platform_data *pdata = dev_get_platdata(dev);
69 const struct platform_device_id *id = platform_get_device_id(pdev); 69 const struct platform_device_id *id = platform_get_device_id(pdev);
70 struct ata_port_info pi = ahci_port_info[id->driver_data]; 70 struct ata_port_info pi = ahci_port_info[id->driver_data];
71 const struct ata_port_info *ppi[] = { &pi, NULL }; 71 const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -191,7 +191,7 @@ err0:
191static int __devexit ahci_remove(struct platform_device *pdev) 191static int __devexit ahci_remove(struct platform_device *pdev)
192{ 192{
193 struct device *dev = &pdev->dev; 193 struct device *dev = &pdev->dev;
194 struct ahci_platform_data *pdata = dev->platform_data; 194 struct ahci_platform_data *pdata = dev_get_platdata(dev);
195 struct ata_host *host = dev_get_drvdata(dev); 195 struct ata_host *host = dev_get_drvdata(dev);
196 196
197 ata_host_detach(host); 197 ata_host_detach(host);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index f22957c2769a..a9b282038000 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2883,7 +2883,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2883 sata_scr_read(link, SCR_STATUS, &sstatus)) 2883 sata_scr_read(link, SCR_STATUS, &sstatus))
2884 rc = -ERESTART; 2884 rc = -ERESTART;
2885 2885
2886 if (rc == -ERESTART || try >= max_tries) { 2886 if (try >= max_tries) {
2887 /* 2887 /*
2888 * Thaw host port even if reset failed, so that the port 2888 * Thaw host port even if reset failed, so that the port
2889 * can be retried on the next phy event. This risks 2889 * can be retried on the next phy event. This risks
@@ -2909,6 +2909,16 @@ int ata_eh_reset(struct ata_link *link, int classify,
2909 ata_eh_acquire(ap); 2909 ata_eh_acquire(ap);
2910 } 2910 }
2911 2911
2912 /*
2913 * While disks spinup behind PMP, some controllers fail sending SRST.
2914 * They need to be reset - as well as the PMP - before retrying.
2915 */
2916 if (rc == -ERESTART) {
2917 if (ata_is_host_link(link))
2918 ata_eh_thaw_port(ap);
2919 goto out;
2920 }
2921
2912 if (try == max_tries - 1) { 2922 if (try == max_tries - 1) {
2913 sata_down_spd_limit(link, 0); 2923 sata_down_spd_limit(link, 0);
2914 if (slave) 2924 if (slave)
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 104462dbc524..21b80c555c60 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -389,12 +389,9 @@ static void sata_pmp_quirks(struct ata_port *ap)
389 /* link reports offline after LPM */ 389 /* link reports offline after LPM */
390 link->flags |= ATA_LFLAG_NO_LPM; 390 link->flags |= ATA_LFLAG_NO_LPM;
391 391
392 /* Class code report is unreliable and SRST 392 /* Class code report is unreliable. */
393 * times out under certain configurations.
394 */
395 if (link->pmp < 5) 393 if (link->pmp < 5)
396 link->flags |= ATA_LFLAG_NO_SRST | 394 link->flags |= ATA_LFLAG_ASSUME_ATA;
397 ATA_LFLAG_ASSUME_ATA;
398 395
399 /* port 5 is for SEMB device and it doesn't like SRST */ 396 /* port 5 is for SEMB device and it doesn't like SRST */
400 if (link->pmp == 5) 397 if (link->pmp == 5)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 72a9770ac42f..2a5412e7e9c1 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1217,6 +1217,10 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
1217 1217
1218/** 1218/**
1219 * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth 1219 * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth
1220 * @ap: ATA port to which the device change the queue depth
1221 * @sdev: SCSI device to configure queue depth for
1222 * @queue_depth: new queue depth
1223 * @reason: calling context
1220 * 1224 *
1221 * libsas and libata have different approaches for associating a sdev to 1225 * libsas and libata have different approaches for associating a sdev to
1222 * its ata_port. 1226 * its ata_port.
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index a72ab0dde4e5..2a472c5bb7db 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -52,7 +52,7 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
52 } 52 }
53 53
54 ret = of_irq_to_resource(dn, 0, &irq_res); 54 ret = of_irq_to_resource(dn, 0, &irq_res);
55 if (ret == NO_IRQ) 55 if (!ret)
56 irq_res.start = irq_res.end = 0; 56 irq_res.start = irq_res.end = 0;
57 else 57 else
58 irq_res.flags = 0; 58 irq_res.flags = 0;
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 447d9c05fb5a..95ec435f0eb4 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -104,7 +104,7 @@ static const struct ata_port_info sis_port_info = {
104}; 104};
105 105
106MODULE_AUTHOR("Uwe Koziolek"); 106MODULE_AUTHOR("Uwe Koziolek");
107MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller"); 107MODULE_DESCRIPTION("low-level driver for Silicon Integrated Systems SATA controller");
108MODULE_LICENSE("GPL"); 108MODULE_LICENSE("GPL");
109MODULE_DEVICE_TABLE(pci, sis_pci_tbl); 109MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
110MODULE_VERSION(DRV_VERSION); 110MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 434a6c011675..95706fa24c73 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -669,7 +669,7 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev)
669 struct device_opp *dev_opp = find_device_opp(dev); 669 struct device_opp *dev_opp = find_device_opp(dev);
670 670
671 if (IS_ERR(dev_opp)) 671 if (IS_ERR(dev_opp))
672 return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */ 672 return ERR_CAST(dev_opp); /* matching type */
673 673
674 return &dev_opp->head; 674 return &dev_opp->head;
675} 675}
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index db7cb8111fbe..106beb194f3c 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -105,7 +105,7 @@ static int ath3k_load_firmware(struct usb_device *udev,
105 105
106 pipe = usb_sndctrlpipe(udev, 0); 106 pipe = usb_sndctrlpipe(udev, 0);
107 107
108 send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); 108 send_buf = kmalloc(BULK_SIZE, GFP_KERNEL);
109 if (!send_buf) { 109 if (!send_buf) {
110 BT_ERR("Can't allocate memory chunk for firmware"); 110 BT_ERR("Can't allocate memory chunk for firmware");
111 return -ENOMEM; 111 return -ENOMEM;
@@ -176,7 +176,7 @@ static int ath3k_load_fwfile(struct usb_device *udev,
176 176
177 count = firmware->size; 177 count = firmware->size;
178 178
179 send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); 179 send_buf = kmalloc(BULK_SIZE, GFP_KERNEL);
180 if (!send_buf) { 180 if (!send_buf) {
181 BT_ERR("Can't allocate memory chunk for firmware"); 181 BT_ERR("Can't allocate memory chunk for firmware");
182 return -ENOMEM; 182 return -ENOMEM;
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 8b1b643a519b..54952ab800b8 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/module.h> 25#include <linux/module.h>
26 26
27#include <linux/atomic.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
28#include <linux/init.h> 29#include <linux/init.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
@@ -65,6 +66,7 @@ struct bcm203x_data {
65 unsigned long state; 66 unsigned long state;
66 67
67 struct work_struct work; 68 struct work_struct work;
69 atomic_t shutdown;
68 70
69 struct urb *urb; 71 struct urb *urb;
70 unsigned char *buffer; 72 unsigned char *buffer;
@@ -97,6 +99,7 @@ static void bcm203x_complete(struct urb *urb)
97 99
98 data->state = BCM203X_SELECT_MEMORY; 100 data->state = BCM203X_SELECT_MEMORY;
99 101
102 /* use workqueue to have a small delay */
100 schedule_work(&data->work); 103 schedule_work(&data->work);
101 break; 104 break;
102 105
@@ -155,7 +158,10 @@ static void bcm203x_work(struct work_struct *work)
155 struct bcm203x_data *data = 158 struct bcm203x_data *data =
156 container_of(work, struct bcm203x_data, work); 159 container_of(work, struct bcm203x_data, work);
157 160
158 if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) 161 if (atomic_read(&data->shutdown))
162 return;
163
164 if (usb_submit_urb(data->urb, GFP_KERNEL) < 0)
159 BT_ERR("Can't submit URB"); 165 BT_ERR("Can't submit URB");
160} 166}
161 167
@@ -243,6 +249,7 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
243 249
244 usb_set_intfdata(intf, data); 250 usb_set_intfdata(intf, data);
245 251
252 /* use workqueue to have a small delay */
246 schedule_work(&data->work); 253 schedule_work(&data->work);
247 254
248 return 0; 255 return 0;
@@ -254,6 +261,9 @@ static void bcm203x_disconnect(struct usb_interface *intf)
254 261
255 BT_DBG("intf %p", intf); 262 BT_DBG("intf %p", intf);
256 263
264 atomic_inc(&data->shutdown);
265 cancel_work_sync(&data->work);
266
257 usb_kill_urb(data->urb); 267 usb_kill_urb(data->urb);
258 268
259 usb_set_intfdata(intf, NULL); 269 usb_set_intfdata(intf, NULL);
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 005919ab043c..61b591470a90 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -568,22 +568,23 @@ static int bfusb_load_firmware(struct bfusb_data *data,
568 568
569 BT_INFO("BlueFRITZ! USB loading firmware"); 569 BT_INFO("BlueFRITZ! USB loading firmware");
570 570
571 buf = kmalloc(BFUSB_MAX_BLOCK_SIZE + 3, GFP_KERNEL);
572 if (!buf) {
573 BT_ERR("Can't allocate memory chunk for firmware");
574 return -ENOMEM;
575 }
576
571 pipe = usb_sndctrlpipe(data->udev, 0); 577 pipe = usb_sndctrlpipe(data->udev, 0);
572 578
573 if (usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION, 579 if (usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION,
574 0, 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT) < 0) { 580 0, 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT) < 0) {
575 BT_ERR("Can't change to loading configuration"); 581 BT_ERR("Can't change to loading configuration");
582 kfree(buf);
576 return -EBUSY; 583 return -EBUSY;
577 } 584 }
578 585
579 data->udev->toggle[0] = data->udev->toggle[1] = 0; 586 data->udev->toggle[0] = data->udev->toggle[1] = 0;
580 587
581 buf = kmalloc(BFUSB_MAX_BLOCK_SIZE + 3, GFP_ATOMIC);
582 if (!buf) {
583 BT_ERR("Can't allocate memory chunk for firmware");
584 return -ENOMEM;
585 }
586
587 pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep); 588 pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep);
588 589
589 while (count) { 590 while (count) {
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 66cd0b8096ca..c92424ca1a55 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1186,10 +1186,11 @@ static void gen6_cleanup(void)
1186/* Certain Gen5 chipsets require require idling the GPU before 1186/* Certain Gen5 chipsets require require idling the GPU before
1187 * unmapping anything from the GTT when VT-d is enabled. 1187 * unmapping anything from the GTT when VT-d is enabled.
1188 */ 1188 */
1189extern int intel_iommu_gfx_mapped;
1190static inline int needs_idle_maps(void) 1189static inline int needs_idle_maps(void)
1191{ 1190{
1191#ifdef CONFIG_INTEL_IOMMU
1192 const unsigned short gpu_devid = intel_private.pcidev->device; 1192 const unsigned short gpu_devid = intel_private.pcidev->device;
1193 extern int intel_iommu_gfx_mapped;
1193 1194
1194 /* Query intel_iommu to see if we need the workaround. Presumably that 1195 /* Query intel_iommu to see if we need the workaround. Presumably that
1195 * was loaded first. 1196 * was loaded first.
@@ -1198,7 +1199,7 @@ static inline int needs_idle_maps(void)
1198 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && 1199 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
1199 intel_iommu_gfx_mapped) 1200 intel_iommu_gfx_mapped)
1200 return 1; 1201 return 1;
1201 1202#endif
1202 return 0; 1203 return 0;
1203} 1204}
1204 1205
@@ -1236,7 +1237,7 @@ static int i9xx_setup(void)
1236 intel_private.gtt_bus_addr = reg_addr + gtt_offset; 1237 intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1237 } 1238 }
1238 1239
1239 if (needs_idle_maps()); 1240 if (needs_idle_maps())
1240 intel_private.base.do_idle_maps = 1; 1241 intel_private.base.do_idle_maps = 1;
1241 1242
1242 intel_i9xx_setup_flush(); 1243 intel_i9xx_setup_flush();
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c
index edaa987621ea..f5002015d82e 100644
--- a/drivers/cpufreq/db8500-cpufreq.c
+++ b/drivers/cpufreq/db8500-cpufreq.c
@@ -109,7 +109,7 @@ static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
109 109
110static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy) 110static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
111{ 111{
112 int res; 112 int i, res;
113 113
114 BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table)); 114 BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table));
115 115
@@ -120,8 +120,8 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
120 freq_table[3].frequency = 1000000; 120 freq_table[3].frequency = 1000000;
121 } 121 }
122 pr_info("db8500-cpufreq : Available frequencies:\n"); 122 pr_info("db8500-cpufreq : Available frequencies:\n");
123 while (freq_table[i].frequency != CPUFREQ_TABLE_END) 123 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
124 pr_info(" %d Mhz\n", freq_table[i++].frequency/1000); 124 pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
125 125
126 /* get policy fields based on the table */ 126 /* get policy fields based on the table */
127 res = cpufreq_frequency_table_cpuinfo(policy, freq_table); 127 res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index becd6d99203b..06ce2680d00d 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -62,8 +62,9 @@ static int __cpuidle_register_device(struct cpuidle_device *dev);
62int cpuidle_idle_call(void) 62int cpuidle_idle_call(void)
63{ 63{
64 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 64 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
65 struct cpuidle_driver *drv = cpuidle_get_driver();
65 struct cpuidle_state *target_state; 66 struct cpuidle_state *target_state;
66 int next_state; 67 int next_state, entered_state;
67 68
68 if (off) 69 if (off)
69 return -ENODEV; 70 return -ENODEV;
@@ -84,45 +85,36 @@ int cpuidle_idle_call(void)
84 hrtimer_peek_ahead_timers(); 85 hrtimer_peek_ahead_timers();
85#endif 86#endif
86 87
87 /*
88 * Call the device's prepare function before calling the
89 * governor's select function. ->prepare gives the device's
90 * cpuidle driver a chance to update any dynamic information
91 * of its cpuidle states for the current idle period, e.g.
92 * state availability, latencies, residencies, etc.
93 */
94 if (dev->prepare)
95 dev->prepare(dev);
96
97 /* ask the governor for the next state */ 88 /* ask the governor for the next state */
98 next_state = cpuidle_curr_governor->select(dev); 89 next_state = cpuidle_curr_governor->select(drv, dev);
99 if (need_resched()) { 90 if (need_resched()) {
100 local_irq_enable(); 91 local_irq_enable();
101 return 0; 92 return 0;
102 } 93 }
103 94
104 target_state = &dev->states[next_state]; 95 target_state = &drv->states[next_state];
105
106 /* enter the state and update stats */
107 dev->last_state = target_state;
108 96
109 trace_power_start(POWER_CSTATE, next_state, dev->cpu); 97 trace_power_start(POWER_CSTATE, next_state, dev->cpu);
110 trace_cpu_idle(next_state, dev->cpu); 98 trace_cpu_idle(next_state, dev->cpu);
111 99
112 dev->last_residency = target_state->enter(dev, target_state); 100 entered_state = target_state->enter(dev, drv, next_state);
113 101
114 trace_power_end(dev->cpu); 102 trace_power_end(dev->cpu);
115 trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); 103 trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
116 104
117 if (dev->last_state) 105 if (entered_state >= 0) {
118 target_state = dev->last_state; 106 /* Update cpuidle counters */
119 107 /* This can be moved to within driver enter routine
120 target_state->time += (unsigned long long)dev->last_residency; 108 * but that results in multiple copies of same code.
121 target_state->usage++; 109 */
110 dev->states_usage[entered_state].time +=
111 (unsigned long long)dev->last_residency;
112 dev->states_usage[entered_state].usage++;
113 }
122 114
123 /* give the governor an opportunity to reflect on the outcome */ 115 /* give the governor an opportunity to reflect on the outcome */
124 if (cpuidle_curr_governor->reflect) 116 if (cpuidle_curr_governor->reflect)
125 cpuidle_curr_governor->reflect(dev); 117 cpuidle_curr_governor->reflect(dev, entered_state);
126 118
127 return 0; 119 return 0;
128} 120}
@@ -173,11 +165,11 @@ void cpuidle_resume_and_unlock(void)
173EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 165EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
174 166
175#ifdef CONFIG_ARCH_HAS_CPU_RELAX 167#ifdef CONFIG_ARCH_HAS_CPU_RELAX
176static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) 168static int poll_idle(struct cpuidle_device *dev,
169 struct cpuidle_driver *drv, int index)
177{ 170{
178 ktime_t t1, t2; 171 ktime_t t1, t2;
179 s64 diff; 172 s64 diff;
180 int ret;
181 173
182 t1 = ktime_get(); 174 t1 = ktime_get();
183 local_irq_enable(); 175 local_irq_enable();
@@ -189,15 +181,14 @@ static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
189 if (diff > INT_MAX) 181 if (diff > INT_MAX)
190 diff = INT_MAX; 182 diff = INT_MAX;
191 183
192 ret = (int) diff; 184 dev->last_residency = (int) diff;
193 return ret; 185
186 return index;
194} 187}
195 188
196static void poll_idle_init(struct cpuidle_device *dev) 189static void poll_idle_init(struct cpuidle_driver *drv)
197{ 190{
198 struct cpuidle_state *state = &dev->states[0]; 191 struct cpuidle_state *state = &drv->states[0];
199
200 cpuidle_set_statedata(state, NULL);
201 192
202 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); 193 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
203 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); 194 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
@@ -208,7 +199,7 @@ static void poll_idle_init(struct cpuidle_device *dev)
208 state->enter = poll_idle; 199 state->enter = poll_idle;
209} 200}
210#else 201#else
211static void poll_idle_init(struct cpuidle_device *dev) {} 202static void poll_idle_init(struct cpuidle_driver *drv) {}
212#endif /* CONFIG_ARCH_HAS_CPU_RELAX */ 203#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
213 204
214/** 205/**
@@ -235,21 +226,20 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
235 return ret; 226 return ret;
236 } 227 }
237 228
238 poll_idle_init(dev); 229 poll_idle_init(cpuidle_get_driver());
239 230
240 if ((ret = cpuidle_add_state_sysfs(dev))) 231 if ((ret = cpuidle_add_state_sysfs(dev)))
241 return ret; 232 return ret;
242 233
243 if (cpuidle_curr_governor->enable && 234 if (cpuidle_curr_governor->enable &&
244 (ret = cpuidle_curr_governor->enable(dev))) 235 (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev)))
245 goto fail_sysfs; 236 goto fail_sysfs;
246 237
247 for (i = 0; i < dev->state_count; i++) { 238 for (i = 0; i < dev->state_count; i++) {
248 dev->states[i].usage = 0; 239 dev->states_usage[i].usage = 0;
249 dev->states[i].time = 0; 240 dev->states_usage[i].time = 0;
250 } 241 }
251 dev->last_residency = 0; 242 dev->last_residency = 0;
252 dev->last_state = NULL;
253 243
254 smp_wmb(); 244 smp_wmb();
255 245
@@ -283,7 +273,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
283 dev->enabled = 0; 273 dev->enabled = 0;
284 274
285 if (cpuidle_curr_governor->disable) 275 if (cpuidle_curr_governor->disable)
286 cpuidle_curr_governor->disable(dev); 276 cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
287 277
288 cpuidle_remove_state_sysfs(dev); 278 cpuidle_remove_state_sysfs(dev);
289 enabled_devices--; 279 enabled_devices--;
@@ -311,26 +301,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
311 301
312 init_completion(&dev->kobj_unregister); 302 init_completion(&dev->kobj_unregister);
313 303
314 /*
315 * cpuidle driver should set the dev->power_specified bit
316 * before registering the device if the driver provides
317 * power_usage numbers.
318 *
319 * For those devices whose ->power_specified is not set,
320 * we fill in power_usage with decreasing values as the
321 * cpuidle code has an implicit assumption that state Cn
322 * uses less power than C(n-1).
323 *
324 * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
325 * an power value of -1. So we use -2, -3, etc, for other
326 * c-states.
327 */
328 if (!dev->power_specified) {
329 int i;
330 for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++)
331 dev->states[i].power_usage = -1 - i;
332 }
333
334 per_cpu(cpuidle_devices, dev->cpu) = dev; 304 per_cpu(cpuidle_devices, dev->cpu) = dev;
335 list_add(&dev->device_list, &cpuidle_detected_devices); 305 list_add(&dev->device_list, &cpuidle_detected_devices);
336 if ((ret = cpuidle_add_sysfs(sys_dev))) { 306 if ((ret = cpuidle_add_sysfs(sys_dev))) {
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 3f7e3cedd133..284d7af5a9c8 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -17,6 +17,30 @@
17static struct cpuidle_driver *cpuidle_curr_driver; 17static struct cpuidle_driver *cpuidle_curr_driver;
18DEFINE_SPINLOCK(cpuidle_driver_lock); 18DEFINE_SPINLOCK(cpuidle_driver_lock);
19 19
20static void __cpuidle_register_driver(struct cpuidle_driver *drv)
21{
22 int i;
23 /*
24 * cpuidle driver should set the drv->power_specified bit
25 * before registering if the driver provides
26 * power_usage numbers.
27 *
28 * If power_specified is not set,
29 * we fill in power_usage with decreasing values as the
30 * cpuidle code has an implicit assumption that state Cn
31 * uses less power than C(n-1).
32 *
33 * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
34 * an power value of -1. So we use -2, -3, etc, for other
35 * c-states.
36 */
37 if (!drv->power_specified) {
38 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
39 drv->states[i].power_usage = -1 - i;
40 }
41}
42
43
20/** 44/**
21 * cpuidle_register_driver - registers a driver 45 * cpuidle_register_driver - registers a driver
22 * @drv: the driver 46 * @drv: the driver
@@ -34,6 +58,7 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
34 spin_unlock(&cpuidle_driver_lock); 58 spin_unlock(&cpuidle_driver_lock);
35 return -EBUSY; 59 return -EBUSY;
36 } 60 }
61 __cpuidle_register_driver(drv);
37 cpuidle_curr_driver = drv; 62 cpuidle_curr_driver = drv;
38 spin_unlock(&cpuidle_driver_lock); 63 spin_unlock(&cpuidle_driver_lock);
39 64
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 3b8fce20f023..b6a09ea859b1 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -60,9 +60,11 @@ static inline void ladder_do_selection(struct ladder_device *ldev,
60 60
61/** 61/**
62 * ladder_select_state - selects the next state to enter 62 * ladder_select_state - selects the next state to enter
63 * @drv: cpuidle driver
63 * @dev: the CPU 64 * @dev: the CPU
64 */ 65 */
65static int ladder_select_state(struct cpuidle_device *dev) 66static int ladder_select_state(struct cpuidle_driver *drv,
67 struct cpuidle_device *dev)
66{ 68{
67 struct ladder_device *ldev = &__get_cpu_var(ladder_devices); 69 struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
68 struct ladder_device_state *last_state; 70 struct ladder_device_state *last_state;
@@ -77,15 +79,17 @@ static int ladder_select_state(struct cpuidle_device *dev)
77 79
78 last_state = &ldev->states[last_idx]; 80 last_state = &ldev->states[last_idx];
79 81
80 if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) 82 if (drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) {
81 last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency; 83 last_residency = cpuidle_get_last_residency(dev) - \
84 drv->states[last_idx].exit_latency;
85 }
82 else 86 else
83 last_residency = last_state->threshold.promotion_time + 1; 87 last_residency = last_state->threshold.promotion_time + 1;
84 88
85 /* consider promotion */ 89 /* consider promotion */
86 if (last_idx < dev->state_count - 1 && 90 if (last_idx < drv->state_count - 1 &&
87 last_residency > last_state->threshold.promotion_time && 91 last_residency > last_state->threshold.promotion_time &&
88 dev->states[last_idx + 1].exit_latency <= latency_req) { 92 drv->states[last_idx + 1].exit_latency <= latency_req) {
89 last_state->stats.promotion_count++; 93 last_state->stats.promotion_count++;
90 last_state->stats.demotion_count = 0; 94 last_state->stats.demotion_count = 0;
91 if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { 95 if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
@@ -96,11 +100,11 @@ static int ladder_select_state(struct cpuidle_device *dev)
96 100
97 /* consider demotion */ 101 /* consider demotion */
98 if (last_idx > CPUIDLE_DRIVER_STATE_START && 102 if (last_idx > CPUIDLE_DRIVER_STATE_START &&
99 dev->states[last_idx].exit_latency > latency_req) { 103 drv->states[last_idx].exit_latency > latency_req) {
100 int i; 104 int i;
101 105
102 for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { 106 for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
103 if (dev->states[i].exit_latency <= latency_req) 107 if (drv->states[i].exit_latency <= latency_req)
104 break; 108 break;
105 } 109 }
106 ladder_do_selection(ldev, last_idx, i); 110 ladder_do_selection(ldev, last_idx, i);
@@ -123,9 +127,11 @@ static int ladder_select_state(struct cpuidle_device *dev)
123 127
124/** 128/**
125 * ladder_enable_device - setup for the governor 129 * ladder_enable_device - setup for the governor
130 * @drv: cpuidle driver
126 * @dev: the CPU 131 * @dev: the CPU
127 */ 132 */
128static int ladder_enable_device(struct cpuidle_device *dev) 133static int ladder_enable_device(struct cpuidle_driver *drv,
134 struct cpuidle_device *dev)
129{ 135{
130 int i; 136 int i;
131 struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); 137 struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
@@ -134,8 +140,8 @@ static int ladder_enable_device(struct cpuidle_device *dev)
134 140
135 ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; 141 ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
136 142
137 for (i = 0; i < dev->state_count; i++) { 143 for (i = 0; i < drv->state_count; i++) {
138 state = &dev->states[i]; 144 state = &drv->states[i];
139 lstate = &ldev->states[i]; 145 lstate = &ldev->states[i];
140 146
141 lstate->stats.promotion_count = 0; 147 lstate->stats.promotion_count = 0;
@@ -144,7 +150,7 @@ static int ladder_enable_device(struct cpuidle_device *dev)
144 lstate->threshold.promotion_count = PROMOTION_COUNT; 150 lstate->threshold.promotion_count = PROMOTION_COUNT;
145 lstate->threshold.demotion_count = DEMOTION_COUNT; 151 lstate->threshold.demotion_count = DEMOTION_COUNT;
146 152
147 if (i < dev->state_count - 1) 153 if (i < drv->state_count - 1)
148 lstate->threshold.promotion_time = state->exit_latency; 154 lstate->threshold.promotion_time = state->exit_latency;
149 if (i > 0) 155 if (i > 0)
150 lstate->threshold.demotion_time = state->exit_latency; 156 lstate->threshold.demotion_time = state->exit_latency;
@@ -153,11 +159,24 @@ static int ladder_enable_device(struct cpuidle_device *dev)
153 return 0; 159 return 0;
154} 160}
155 161
162/**
163 * ladder_reflect - update the correct last_state_idx
164 * @dev: the CPU
165 * @index: the index of actual state entered
166 */
167static void ladder_reflect(struct cpuidle_device *dev, int index)
168{
169 struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
170 if (index > 0)
171 ldev->last_state_idx = index;
172}
173
156static struct cpuidle_governor ladder_governor = { 174static struct cpuidle_governor ladder_governor = {
157 .name = "ladder", 175 .name = "ladder",
158 .rating = 10, 176 .rating = 10,
159 .enable = ladder_enable_device, 177 .enable = ladder_enable_device,
160 .select = ladder_select_state, 178 .select = ladder_select_state,
179 .reflect = ladder_reflect,
161 .owner = THIS_MODULE, 180 .owner = THIS_MODULE,
162}; 181};
163 182
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 00275244ce2f..ad0952601ae2 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -183,7 +183,7 @@ static inline int performance_multiplier(void)
183 183
184static DEFINE_PER_CPU(struct menu_device, menu_devices); 184static DEFINE_PER_CPU(struct menu_device, menu_devices);
185 185
186static void menu_update(struct cpuidle_device *dev); 186static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
187 187
188/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ 188/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
189static u64 div_round64(u64 dividend, u32 divisor) 189static u64 div_round64(u64 dividend, u32 divisor)
@@ -229,9 +229,10 @@ static void detect_repeating_patterns(struct menu_device *data)
229 229
230/** 230/**
231 * menu_select - selects the next idle state to enter 231 * menu_select - selects the next idle state to enter
232 * @drv: cpuidle driver containing state data
232 * @dev: the CPU 233 * @dev: the CPU
233 */ 234 */
234static int menu_select(struct cpuidle_device *dev) 235static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
235{ 236{
236 struct menu_device *data = &__get_cpu_var(menu_devices); 237 struct menu_device *data = &__get_cpu_var(menu_devices);
237 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); 238 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
@@ -241,7 +242,7 @@ static int menu_select(struct cpuidle_device *dev)
241 struct timespec t; 242 struct timespec t;
242 243
243 if (data->needs_update) { 244 if (data->needs_update) {
244 menu_update(dev); 245 menu_update(drv, dev);
245 data->needs_update = 0; 246 data->needs_update = 0;
246 } 247 }
247 248
@@ -286,11 +287,9 @@ static int menu_select(struct cpuidle_device *dev)
286 * Find the idle state with the lowest power while satisfying 287 * Find the idle state with the lowest power while satisfying
287 * our constraints. 288 * our constraints.
288 */ 289 */
289 for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) { 290 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
290 struct cpuidle_state *s = &dev->states[i]; 291 struct cpuidle_state *s = &drv->states[i];
291 292
292 if (s->flags & CPUIDLE_FLAG_IGNORE)
293 continue;
294 if (s->target_residency > data->predicted_us) 293 if (s->target_residency > data->predicted_us)
295 continue; 294 continue;
296 if (s->exit_latency > latency_req) 295 if (s->exit_latency > latency_req)
@@ -311,26 +310,30 @@ static int menu_select(struct cpuidle_device *dev)
311/** 310/**
312 * menu_reflect - records that data structures need update 311 * menu_reflect - records that data structures need update
313 * @dev: the CPU 312 * @dev: the CPU
313 * @index: the index of actual entered state
314 * 314 *
315 * NOTE: it's important to be fast here because this operation will add to 315 * NOTE: it's important to be fast here because this operation will add to
316 * the overall exit latency. 316 * the overall exit latency.
317 */ 317 */
318static void menu_reflect(struct cpuidle_device *dev) 318static void menu_reflect(struct cpuidle_device *dev, int index)
319{ 319{
320 struct menu_device *data = &__get_cpu_var(menu_devices); 320 struct menu_device *data = &__get_cpu_var(menu_devices);
321 data->needs_update = 1; 321 data->last_state_idx = index;
322 if (index >= 0)
323 data->needs_update = 1;
322} 324}
323 325
324/** 326/**
325 * menu_update - attempts to guess what happened after entry 327 * menu_update - attempts to guess what happened after entry
328 * @drv: cpuidle driver containing state data
326 * @dev: the CPU 329 * @dev: the CPU
327 */ 330 */
328static void menu_update(struct cpuidle_device *dev) 331static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
329{ 332{
330 struct menu_device *data = &__get_cpu_var(menu_devices); 333 struct menu_device *data = &__get_cpu_var(menu_devices);
331 int last_idx = data->last_state_idx; 334 int last_idx = data->last_state_idx;
332 unsigned int last_idle_us = cpuidle_get_last_residency(dev); 335 unsigned int last_idle_us = cpuidle_get_last_residency(dev);
333 struct cpuidle_state *target = &dev->states[last_idx]; 336 struct cpuidle_state *target = &drv->states[last_idx];
334 unsigned int measured_us; 337 unsigned int measured_us;
335 u64 new_factor; 338 u64 new_factor;
336 339
@@ -384,9 +387,11 @@ static void menu_update(struct cpuidle_device *dev)
384 387
385/** 388/**
386 * menu_enable_device - scans a CPU's states and does setup 389 * menu_enable_device - scans a CPU's states and does setup
390 * @drv: cpuidle driver
387 * @dev: the CPU 391 * @dev: the CPU
388 */ 392 */
389static int menu_enable_device(struct cpuidle_device *dev) 393static int menu_enable_device(struct cpuidle_driver *drv,
394 struct cpuidle_device *dev)
390{ 395{
391 struct menu_device *data = &per_cpu(menu_devices, dev->cpu); 396 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
392 397
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index be7917ec40c9..1e756e160dca 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -216,7 +216,8 @@ static struct kobj_type ktype_cpuidle = {
216 216
217struct cpuidle_state_attr { 217struct cpuidle_state_attr {
218 struct attribute attr; 218 struct attribute attr;
219 ssize_t (*show)(struct cpuidle_state *, char *); 219 ssize_t (*show)(struct cpuidle_state *, \
220 struct cpuidle_state_usage *, char *);
220 ssize_t (*store)(struct cpuidle_state *, const char *, size_t); 221 ssize_t (*store)(struct cpuidle_state *, const char *, size_t);
221}; 222};
222 223
@@ -224,19 +225,22 @@ struct cpuidle_state_attr {
224static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL) 225static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
225 226
226#define define_show_state_function(_name) \ 227#define define_show_state_function(_name) \
227static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ 228static ssize_t show_state_##_name(struct cpuidle_state *state, \
229 struct cpuidle_state_usage *state_usage, char *buf) \
228{ \ 230{ \
229 return sprintf(buf, "%u\n", state->_name);\ 231 return sprintf(buf, "%u\n", state->_name);\
230} 232}
231 233
232#define define_show_state_ull_function(_name) \ 234#define define_show_state_ull_function(_name) \
233static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ 235static ssize_t show_state_##_name(struct cpuidle_state *state, \
236 struct cpuidle_state_usage *state_usage, char *buf) \
234{ \ 237{ \
235 return sprintf(buf, "%llu\n", state->_name);\ 238 return sprintf(buf, "%llu\n", state_usage->_name);\
236} 239}
237 240
238#define define_show_state_str_function(_name) \ 241#define define_show_state_str_function(_name) \
239static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ 242static ssize_t show_state_##_name(struct cpuidle_state *state, \
243 struct cpuidle_state_usage *state_usage, char *buf) \
240{ \ 244{ \
241 if (state->_name[0] == '\0')\ 245 if (state->_name[0] == '\0')\
242 return sprintf(buf, "<null>\n");\ 246 return sprintf(buf, "<null>\n");\
@@ -269,16 +273,18 @@ static struct attribute *cpuidle_state_default_attrs[] = {
269 273
270#define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) 274#define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj)
271#define kobj_to_state(k) (kobj_to_state_obj(k)->state) 275#define kobj_to_state(k) (kobj_to_state_obj(k)->state)
276#define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage)
272#define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr) 277#define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr)
273static ssize_t cpuidle_state_show(struct kobject * kobj, 278static ssize_t cpuidle_state_show(struct kobject * kobj,
274 struct attribute * attr ,char * buf) 279 struct attribute * attr ,char * buf)
275{ 280{
276 int ret = -EIO; 281 int ret = -EIO;
277 struct cpuidle_state *state = kobj_to_state(kobj); 282 struct cpuidle_state *state = kobj_to_state(kobj);
283 struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj);
278 struct cpuidle_state_attr * cattr = attr_to_stateattr(attr); 284 struct cpuidle_state_attr * cattr = attr_to_stateattr(attr);
279 285
280 if (cattr->show) 286 if (cattr->show)
281 ret = cattr->show(state, buf); 287 ret = cattr->show(state, state_usage, buf);
282 288
283 return ret; 289 return ret;
284} 290}
@@ -316,13 +322,15 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device)
316{ 322{
317 int i, ret = -ENOMEM; 323 int i, ret = -ENOMEM;
318 struct cpuidle_state_kobj *kobj; 324 struct cpuidle_state_kobj *kobj;
325 struct cpuidle_driver *drv = cpuidle_get_driver();
319 326
320 /* state statistics */ 327 /* state statistics */
321 for (i = 0; i < device->state_count; i++) { 328 for (i = 0; i < device->state_count; i++) {
322 kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); 329 kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
323 if (!kobj) 330 if (!kobj)
324 goto error_state; 331 goto error_state;
325 kobj->state = &device->states[i]; 332 kobj->state = &drv->states[i];
333 kobj->state_usage = &device->states_usage[i];
326 init_completion(&kobj->kobj_unregister); 334 init_completion(&kobj->kobj_unregister);
327 335
328 ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj, 336 ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj,
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 0e49d87f6c60..0b0562979171 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -148,13 +148,17 @@ static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
148 return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0; 148 return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
149} 149}
150 150
151#define MOD_REG_BIT(reg, bit_mask, set) \ 151static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
152do { \ 152{
153 int l = __raw_readl(base + reg); \ 153 int l = __raw_readl(base + reg);
154 if (set) l |= bit_mask; \ 154
155 else l &= ~bit_mask; \ 155 if (set)
156 __raw_writel(l, base + reg); \ 156 l |= mask;
157} while(0) 157 else
158 l &= ~mask;
159
160 __raw_writel(l, base + reg);
161}
158 162
159/** 163/**
160 * _set_gpio_debounce - low level gpio debounce time 164 * _set_gpio_debounce - low level gpio debounce time
@@ -210,28 +214,28 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
210 u32 gpio_bit = 1 << gpio; 214 u32 gpio_bit = 1 << gpio;
211 215
212 if (cpu_is_omap44xx()) { 216 if (cpu_is_omap44xx()) {
213 MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit, 217 _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT0, gpio_bit,
214 trigger & IRQ_TYPE_LEVEL_LOW); 218 trigger & IRQ_TYPE_LEVEL_LOW);
215 MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT1, gpio_bit, 219 _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT1, gpio_bit,
216 trigger & IRQ_TYPE_LEVEL_HIGH); 220 trigger & IRQ_TYPE_LEVEL_HIGH);
217 MOD_REG_BIT(OMAP4_GPIO_RISINGDETECT, gpio_bit, 221 _gpio_rmw(base, OMAP4_GPIO_RISINGDETECT, gpio_bit,
218 trigger & IRQ_TYPE_EDGE_RISING); 222 trigger & IRQ_TYPE_EDGE_RISING);
219 MOD_REG_BIT(OMAP4_GPIO_FALLINGDETECT, gpio_bit, 223 _gpio_rmw(base, OMAP4_GPIO_FALLINGDETECT, gpio_bit,
220 trigger & IRQ_TYPE_EDGE_FALLING); 224 trigger & IRQ_TYPE_EDGE_FALLING);
221 } else { 225 } else {
222 MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit, 226 _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
223 trigger & IRQ_TYPE_LEVEL_LOW); 227 trigger & IRQ_TYPE_LEVEL_LOW);
224 MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit, 228 _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
225 trigger & IRQ_TYPE_LEVEL_HIGH); 229 trigger & IRQ_TYPE_LEVEL_HIGH);
226 MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit, 230 _gpio_rmw(base, OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
227 trigger & IRQ_TYPE_EDGE_RISING); 231 trigger & IRQ_TYPE_EDGE_RISING);
228 MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit, 232 _gpio_rmw(base, OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
229 trigger & IRQ_TYPE_EDGE_FALLING); 233 trigger & IRQ_TYPE_EDGE_FALLING);
230 } 234 }
231 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { 235 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
232 if (cpu_is_omap44xx()) { 236 if (cpu_is_omap44xx()) {
233 MOD_REG_BIT(OMAP4_GPIO_IRQWAKEN0, gpio_bit, 237 _gpio_rmw(base, OMAP4_GPIO_IRQWAKEN0, gpio_bit,
234 trigger != 0); 238 trigger != 0);
235 } else { 239 } else {
236 /* 240 /*
237 * GPIO wakeup request can only be generated on edge 241 * GPIO wakeup request can only be generated on edge
@@ -1086,6 +1090,11 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
1086 1090
1087 gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base, 1091 gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
1088 handle_simple_irq); 1092 handle_simple_irq);
1093 if (!gc) {
1094 dev_err(bank->dev, "Memory alloc failed for gc\n");
1095 return;
1096 }
1097
1089 ct = gc->chip_types; 1098 ct = gc->chip_types;
1090 1099
1091 /* NOTE: No ack required, reading IRQ status clears it. */ 1100 /* NOTE: No ack required, reading IRQ status clears it. */
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 0550dcb85814..147df8ae79db 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -596,9 +596,6 @@ static int __devinit device_pca953x_init(struct pca953x_chip *chip, int invert)
596 596
597 /* set platform specific polarity inversion */ 597 /* set platform specific polarity inversion */
598 ret = pca953x_write_reg(chip, PCA953X_INVERT, invert); 598 ret = pca953x_write_reg(chip, PCA953X_INVERT, invert);
599 if (ret)
600 goto out;
601 return 0;
602out: 599out:
603 return ret; 600 return ret;
604} 601}
@@ -640,7 +637,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
640 struct pca953x_platform_data *pdata; 637 struct pca953x_platform_data *pdata;
641 struct pca953x_chip *chip; 638 struct pca953x_chip *chip;
642 int irq_base=0, invert=0; 639 int irq_base=0, invert=0;
643 int ret = 0; 640 int ret;
644 641
645 chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL); 642 chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
646 if (chip == NULL) 643 if (chip == NULL)
@@ -673,10 +670,10 @@ static int __devinit pca953x_probe(struct i2c_client *client,
673 pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK); 670 pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK);
674 671
675 if (chip->chip_type == PCA953X_TYPE) 672 if (chip->chip_type == PCA953X_TYPE)
676 device_pca953x_init(chip, invert); 673 ret = device_pca953x_init(chip, invert);
677 else if (chip->chip_type == PCA957X_TYPE)
678 device_pca957x_init(chip, invert);
679 else 674 else
675 ret = device_pca957x_init(chip, invert);
676 if (ret)
680 goto out_failed; 677 goto out_failed;
681 678
682 ret = pca953x_irq_setup(chip, id, irq_base); 679 ret = pca953x_irq_setup(chip, id, irq_base);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 785127cb281b..1368826ef284 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,7 +9,6 @@ menuconfig DRM
9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU 9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
10 select I2C 10 select I2C
11 select I2C_ALGOBIT 11 select I2C_ALGOBIT
12 select SLOW_WORK
13 help 12 help
14 Kernel-level support for the Direct Rendering Infrastructure (DRI) 13 Kernel-level support for the Direct Rendering Infrastructure (DRI)
15 introduced in XFree86 4.0. If you say Y here, you need to select 14 introduced in XFree86 4.0. If you say Y here, you need to select
@@ -96,6 +95,7 @@ config DRM_I915
96 select FB_CFB_IMAGEBLIT 95 select FB_CFB_IMAGEBLIT
97 # i915 depends on ACPI_VIDEO when ACPI is enabled 96 # i915 depends on ACPI_VIDEO when ACPI is enabled
98 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 97 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
98 select BACKLIGHT_LCD_SUPPORT if ACPI
99 select BACKLIGHT_CLASS_DEVICE if ACPI 99 select BACKLIGHT_CLASS_DEVICE if ACPI
100 select VIDEO_OUTPUT_CONTROL if ACPI 100 select VIDEO_OUTPUT_CONTROL if ACPI
101 select INPUT if ACPI 101 select INPUT if ACPI
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 711d9653abd0..405c63b9d539 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -163,6 +163,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
163 { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 }, 163 { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
164 { DRM_MODE_CONNECTOR_TV, "TV", 0 }, 164 { DRM_MODE_CONNECTOR_TV, "TV", 0 },
165 { DRM_MODE_CONNECTOR_eDP, "eDP", 0 }, 165 { DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
166 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
166}; 167};
167 168
168static struct drm_prop_enum_list drm_encoder_enum_list[] = 169static struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -171,6 +172,7 @@ static struct drm_prop_enum_list drm_encoder_enum_list[] =
171 { DRM_MODE_ENCODER_TMDS, "TMDS" }, 172 { DRM_MODE_ENCODER_TMDS, "TMDS" },
172 { DRM_MODE_ENCODER_LVDS, "LVDS" }, 173 { DRM_MODE_ENCODER_LVDS, "LVDS" },
173 { DRM_MODE_ENCODER_TVDAC, "TV" }, 174 { DRM_MODE_ENCODER_TVDAC, "TV" },
175 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
174}; 176};
175 177
176char *drm_get_encoder_name(struct drm_encoder *encoder) 178char *drm_get_encoder_name(struct drm_encoder *encoder)
@@ -464,8 +466,10 @@ void drm_connector_init(struct drm_device *dev,
464 list_add_tail(&connector->head, &dev->mode_config.connector_list); 466 list_add_tail(&connector->head, &dev->mode_config.connector_list);
465 dev->mode_config.num_connector++; 467 dev->mode_config.num_connector++;
466 468
467 drm_connector_attach_property(connector, 469 if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
468 dev->mode_config.edid_property, 0); 470 drm_connector_attach_property(connector,
471 dev->mode_config.edid_property,
472 0);
469 473
470 drm_connector_attach_property(connector, 474 drm_connector_attach_property(connector,
471 dev->mode_config.dpms_property, 0); 475 dev->mode_config.dpms_property, 0);
@@ -2114,8 +2118,10 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
2114 property->num_values = num_values; 2118 property->num_values = num_values;
2115 INIT_LIST_HEAD(&property->enum_blob_list); 2119 INIT_LIST_HEAD(&property->enum_blob_list);
2116 2120
2117 if (name) 2121 if (name) {
2118 strncpy(property->name, name, DRM_PROP_NAME_LEN); 2122 strncpy(property->name, name, DRM_PROP_NAME_LEN);
2123 property->name[DRM_PROP_NAME_LEN-1] = '\0';
2124 }
2119 2125
2120 list_add_tail(&property->head, &dev->mode_config.property_list); 2126 list_add_tail(&property->head, &dev->mode_config.property_list);
2121 return property; 2127 return property;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 2957636161e8..3969f7553fe7 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -484,6 +484,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
484 struct drm_connector *save_connectors, *connector; 484 struct drm_connector *save_connectors, *connector;
485 int count = 0, ro, fail = 0; 485 int count = 0, ro, fail = 0;
486 struct drm_crtc_helper_funcs *crtc_funcs; 486 struct drm_crtc_helper_funcs *crtc_funcs;
487 struct drm_mode_set save_set;
487 int ret = 0; 488 int ret = 0;
488 int i; 489 int i;
489 490
@@ -556,6 +557,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
556 save_connectors[count++] = *connector; 557 save_connectors[count++] = *connector;
557 } 558 }
558 559
560 save_set.crtc = set->crtc;
561 save_set.mode = &set->crtc->mode;
562 save_set.x = set->crtc->x;
563 save_set.y = set->crtc->y;
564 save_set.fb = set->crtc->fb;
565
559 /* We should be able to check here if the fb has the same properties 566 /* We should be able to check here if the fb has the same properties
560 * and then just flip_or_move it */ 567 * and then just flip_or_move it */
561 if (set->crtc->fb != set->fb) { 568 if (set->crtc->fb != set->fb) {
@@ -721,6 +728,12 @@ fail:
721 *connector = save_connectors[count++]; 728 *connector = save_connectors[count++];
722 } 729 }
723 730
731 /* Try to restore the config */
732 if (mode_changed &&
733 !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
734 save_set.y, save_set.fb))
735 DRM_ERROR("failed to restore config after modeset failure\n");
736
724 kfree(save_connectors); 737 kfree(save_connectors);
725 kfree(save_encoders); 738 kfree(save_encoders);
726 kfree(save_crtcs); 739 kfree(save_crtcs);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index d067c12ba940..1c7a1c0d3edd 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -118,7 +118,10 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
118 tmp->minor = minor; 118 tmp->minor = minor;
119 tmp->dent = ent; 119 tmp->dent = ent;
120 tmp->info_ent = &files[i]; 120 tmp->info_ent = &files[i];
121 list_add(&(tmp->list), &(minor->debugfs_nodes.list)); 121
122 mutex_lock(&minor->debugfs_lock);
123 list_add(&tmp->list, &minor->debugfs_list);
124 mutex_unlock(&minor->debugfs_lock);
122 } 125 }
123 return 0; 126 return 0;
124 127
@@ -146,7 +149,8 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
146 char name[64]; 149 char name[64];
147 int ret; 150 int ret;
148 151
149 INIT_LIST_HEAD(&minor->debugfs_nodes.list); 152 INIT_LIST_HEAD(&minor->debugfs_list);
153 mutex_init(&minor->debugfs_lock);
150 sprintf(name, "%d", minor_id); 154 sprintf(name, "%d", minor_id);
151 minor->debugfs_root = debugfs_create_dir(name, root); 155 minor->debugfs_root = debugfs_create_dir(name, root);
152 if (!minor->debugfs_root) { 156 if (!minor->debugfs_root) {
@@ -192,8 +196,9 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
192 struct drm_info_node *tmp; 196 struct drm_info_node *tmp;
193 int i; 197 int i;
194 198
199 mutex_lock(&minor->debugfs_lock);
195 for (i = 0; i < count; i++) { 200 for (i = 0; i < count; i++) {
196 list_for_each_safe(pos, q, &minor->debugfs_nodes.list) { 201 list_for_each_safe(pos, q, &minor->debugfs_list) {
197 tmp = list_entry(pos, struct drm_info_node, list); 202 tmp = list_entry(pos, struct drm_info_node, list);
198 if (tmp->info_ent == &files[i]) { 203 if (tmp->info_ent == &files[i]) {
199 debugfs_remove(tmp->dent); 204 debugfs_remove(tmp->dent);
@@ -202,6 +207,7 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
202 } 207 }
203 } 208 }
204 } 209 }
210 mutex_unlock(&minor->debugfs_lock);
205 return 0; 211 return 0;
206} 212}
207EXPORT_SYMBOL(drm_debugfs_remove_files); 213EXPORT_SYMBOL(drm_debugfs_remove_files);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index fc81af9dbf42..40c187c60f44 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -125,7 +125,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
125 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 125 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
126 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 126 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
127 127
128 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), 128 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
129 129
130 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), 130 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
131 131
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index cb3794a00f98..68b756253f9f 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -407,13 +407,16 @@ int drm_irq_uninstall(struct drm_device *dev)
407 /* 407 /*
408 * Wake up any waiters so they don't hang. 408 * Wake up any waiters so they don't hang.
409 */ 409 */
410 spin_lock_irqsave(&dev->vbl_lock, irqflags); 410 if (dev->num_crtcs) {
411 for (i = 0; i < dev->num_crtcs; i++) { 411 spin_lock_irqsave(&dev->vbl_lock, irqflags);
412 DRM_WAKEUP(&dev->vbl_queue[i]); 412 for (i = 0; i < dev->num_crtcs; i++) {
413 dev->vblank_enabled[i] = 0; 413 DRM_WAKEUP(&dev->vbl_queue[i]);
414 dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i); 414 dev->vblank_enabled[i] = 0;
415 dev->last_vblank[i] =
416 dev->driver->get_vblank_counter(dev, i);
417 }
418 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
415 } 419 }
416 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
417 420
418 if (!irq_enabled) 421 if (!irq_enabled)
419 return -EINVAL; 422 return -EINVAL;
@@ -1125,6 +1128,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
1125 trace_drm_vblank_event_delivered(current->pid, pipe, 1128 trace_drm_vblank_event_delivered(current->pid, pipe,
1126 vblwait->request.sequence); 1129 vblwait->request.sequence);
1127 } else { 1130 } else {
1131 /* drm_handle_vblank_events will call drm_vblank_put */
1128 list_add_tail(&e->base.link, &dev->vblank_event_list); 1132 list_add_tail(&e->base.link, &dev->vblank_event_list);
1129 vblwait->reply.sequence = vblwait->request.sequence; 1133 vblwait->reply.sequence = vblwait->request.sequence;
1130 } 1134 }
@@ -1205,8 +1209,12 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1205 goto done; 1209 goto done;
1206 } 1210 }
1207 1211
1208 if (flags & _DRM_VBLANK_EVENT) 1212 if (flags & _DRM_VBLANK_EVENT) {
1213 /* must hold on to the vblank ref until the event fires
1214 * drm_vblank_put will be called asynchronously
1215 */
1209 return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); 1216 return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
1217 }
1210 1218
1211 if ((flags & _DRM_VBLANK_NEXTONMISS) && 1219 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
1212 (seq - vblwait->request.sequence) <= (1<<23)) { 1220 (seq - vblwait->request.sequence) <= (1<<23)) {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d14b44e13f51..4f40f1ce1d8e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1506,7 +1506,10 @@ drm_add_fake_info_node(struct drm_minor *minor,
1506 node->minor = minor; 1506 node->minor = minor;
1507 node->dent = ent; 1507 node->dent = ent;
1508 node->info_ent = (void *) key; 1508 node->info_ent = (void *) key;
1509 list_add(&node->list, &minor->debugfs_nodes.list); 1509
1510 mutex_lock(&minor->debugfs_lock);
1511 list_add(&node->list, &minor->debugfs_list);
1512 mutex_unlock(&minor->debugfs_lock);
1510 1513
1511 return 0; 1514 return 0;
1512} 1515}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cc531bb59c26..e9c2cfe45daa 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -789,8 +789,8 @@ static struct vm_operations_struct i915_gem_vm_ops = {
789}; 789};
790 790
791static struct drm_driver driver = { 791static struct drm_driver driver = {
792 /* don't use mtrr's here, the Xserver or user space app should 792 /* Don't use MTRRs here; the Xserver or userspace app should
793 * deal with them for intel hardware. 793 * deal with them for Intel hardware.
794 */ 794 */
795 .driver_features = 795 .driver_features =
796 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 796 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6651c36b6e8a..d18b07adcffa 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1396,7 +1396,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
1396 1396
1397 if (obj->base.size > dev_priv->mm.gtt_mappable_end) { 1397 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1398 ret = -E2BIG; 1398 ret = -E2BIG;
1399 goto unlock; 1399 goto out;
1400 } 1400 }
1401 1401
1402 if (obj->madv != I915_MADV_WILLNEED) { 1402 if (obj->madv != I915_MADV_WILLNEED) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 032a82098136..5fc201b49d30 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -640,10 +640,9 @@ static int
640nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) 640nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
641{ 641{
642 struct drm_nouveau_private *dev_priv = dev->dev_private; 642 struct drm_nouveau_private *dev_priv = dev->dev_private;
643 uint32_t reg0 = nv_rd32(dev, reg + 0);
644 uint32_t reg1 = nv_rd32(dev, reg + 4);
645 struct nouveau_pll_vals pll; 643 struct nouveau_pll_vals pll;
646 struct pll_lims pll_limits; 644 struct pll_lims pll_limits;
645 u32 ctrl, mask, coef;
647 int ret; 646 int ret;
648 647
649 ret = get_pll_limits(dev, reg, &pll_limits); 648 ret = get_pll_limits(dev, reg, &pll_limits);
@@ -654,15 +653,20 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
654 if (!clk) 653 if (!clk)
655 return -ERANGE; 654 return -ERANGE;
656 655
657 reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16); 656 coef = pll.N1 << 8 | pll.M1;
658 reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1; 657 ctrl = pll.log2P << 16;
659 658 mask = 0x00070000;
660 if (dev_priv->vbios.execute) { 659 if (reg == 0x004008) {
661 still_alive(); 660 mask |= 0x01f80000;
662 nv_wr32(dev, reg + 4, reg1); 661 ctrl |= (pll_limits.log2p_bias << 19);
663 nv_wr32(dev, reg + 0, reg0); 662 ctrl |= (pll.log2P << 22);
664 } 663 }
665 664
665 if (!dev_priv->vbios.execute)
666 return 0;
667
668 nv_mask(dev, reg + 0, mask, ctrl);
669 nv_wr32(dev, reg + 4, coef);
666 return 0; 670 return 0;
667} 671}
668 672
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7226f419e178..7cc37e690860 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -148,7 +148,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
148 148
149 if (dev_priv->card_type == NV_10 && 149 if (dev_priv->card_type == NV_10 &&
150 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && 150 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
151 nvbo->bo.mem.num_pages < vram_pages / 2) { 151 nvbo->bo.mem.num_pages < vram_pages / 4) {
152 /* 152 /*
153 * Make sure that the color and depth buffers are handled 153 * Make sure that the color and depth buffers are handled
154 * by independent memory controller units. Up to a 9x 154 * by independent memory controller units. Up to a 9x
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index a319d5646ea9..bb6ec9ef8676 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -158,6 +158,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
158 INIT_LIST_HEAD(&chan->nvsw.vbl_wait); 158 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
159 INIT_LIST_HEAD(&chan->nvsw.flip); 159 INIT_LIST_HEAD(&chan->nvsw.flip);
160 INIT_LIST_HEAD(&chan->fence.pending); 160 INIT_LIST_HEAD(&chan->fence.pending);
161 spin_lock_init(&chan->fence.lock);
161 162
162 /* setup channel's memory and vm */ 163 /* setup channel's memory and vm */
163 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); 164 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index e0d275e1c96c..cea6696b1906 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -710,7 +710,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
710 case OUTPUT_DP: 710 case OUTPUT_DP:
711 max_clock = nv_encoder->dp.link_nr; 711 max_clock = nv_encoder->dp.link_nr;
712 max_clock *= nv_encoder->dp.link_bw; 712 max_clock *= nv_encoder->dp.link_bw;
713 clock = clock * nouveau_connector_bpp(connector) / 8; 713 clock = clock * nouveau_connector_bpp(connector) / 10;
714 break; 714 break;
715 default: 715 default:
716 BUG_ON(1); 716 BUG_ON(1);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 14a8627efe4d..3a4cc32b9e44 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -487,6 +487,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
487{ 487{
488 struct drm_nouveau_private *dev_priv = dev->dev_private; 488 struct drm_nouveau_private *dev_priv = dev->dev_private;
489 struct nouveau_fbdev *nfbdev; 489 struct nouveau_fbdev *nfbdev;
490 int preferred_bpp;
490 int ret; 491 int ret;
491 492
492 nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); 493 nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
@@ -505,7 +506,15 @@ int nouveau_fbcon_init(struct drm_device *dev)
505 } 506 }
506 507
507 drm_fb_helper_single_add_all_connectors(&nfbdev->helper); 508 drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
508 drm_fb_helper_initial_config(&nfbdev->helper, 32); 509
510 if (dev_priv->vram_size <= 32 * 1024 * 1024)
511 preferred_bpp = 8;
512 else if (dev_priv->vram_size <= 64 * 1024 * 1024)
513 preferred_bpp = 16;
514 else
515 preferred_bpp = 32;
516
517 drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp);
509 return 0; 518 return 0;
510} 519}
511 520
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 81116cfea275..2f6daae68b9d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -539,8 +539,6 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
539 return ret; 539 return ret;
540 } 540 }
541 541
542 INIT_LIST_HEAD(&chan->fence.pending);
543 spin_lock_init(&chan->fence.lock);
544 atomic_set(&chan->fence.last_sequence_irq, 0); 542 atomic_set(&chan->fence.last_sequence_irq, 0);
545 return 0; 543 return 0;
546} 544}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index c6143df48b9f..d39b2202b197 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -333,7 +333,7 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
333 333
334 NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); 334 NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
335 335
336 for (i = 0; info[i].addr; i++) { 336 for (i = 0; i2c && info[i].addr; i++) {
337 if (nouveau_probe_i2c_addr(i2c, info[i].addr) && 337 if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
338 (!match || match(i2c, &info[i]))) { 338 (!match || match(i2c, &info[i]))) {
339 NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); 339 NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 9f178aa94162..33d03fbf00df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -239,7 +239,7 @@ nouveau_perf_init(struct drm_device *dev)
239 if(version == 0x15) { 239 if(version == 0x15) {
240 memtimings->timing = 240 memtimings->timing =
241 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); 241 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
242 if(!memtimings) { 242 if (!memtimings->timing) {
243 NV_WARN(dev,"Could not allocate memtiming table\n"); 243 NV_WARN(dev,"Could not allocate memtiming table\n");
244 return; 244 return;
245 } 245 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 82478e0998e5..d8831ab42bb9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -579,6 +579,14 @@ nouveau_card_init(struct drm_device *dev)
579 if (ret) 579 if (ret)
580 goto out_display_early; 580 goto out_display_early;
581 581
582 /* workaround an odd issue on nvc1 by disabling the device's
583 * nosnoop capability. hopefully won't cause issues until a
584 * better fix is found - assuming there is one...
585 */
586 if (dev_priv->chipset == 0xc1) {
587 nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
588 }
589
582 nouveau_pm_init(dev); 590 nouveau_pm_init(dev);
583 591
584 ret = engine->vram.init(dev); 592 ret = engine->vram.init(dev);
@@ -1102,12 +1110,13 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
1102 dev_priv->noaccel = !!nouveau_noaccel; 1110 dev_priv->noaccel = !!nouveau_noaccel;
1103 if (nouveau_noaccel == -1) { 1111 if (nouveau_noaccel == -1) {
1104 switch (dev_priv->chipset) { 1112 switch (dev_priv->chipset) {
1105 case 0xc1: /* known broken */ 1113#if 0
1106 case 0xc8: /* never tested */ 1114 case 0xXX: /* known broken */
1107 NV_INFO(dev, "acceleration disabled by default, pass " 1115 NV_INFO(dev, "acceleration disabled by default, pass "
1108 "noaccel=0 to force enable\n"); 1116 "noaccel=0 to force enable\n");
1109 dev_priv->noaccel = true; 1117 dev_priv->noaccel = true;
1110 break; 1118 break;
1119#endif
1111 default: 1120 default:
1112 dev_priv->noaccel = false; 1121 dev_priv->noaccel = false;
1113 break; 1122 break;
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index bbc0b9c7e1f7..e676b0d53478 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -57,12 +57,14 @@ read_pll_2(struct drm_device *dev, u32 reg)
57 int P = (ctrl & 0x00070000) >> 16; 57 int P = (ctrl & 0x00070000) >> 16;
58 u32 ref = 27000, clk = 0; 58 u32 ref = 27000, clk = 0;
59 59
60 if (ctrl & 0x80000000) 60 if ((ctrl & 0x80000000) && M1) {
61 clk = ref * N1 / M1; 61 clk = ref * N1 / M1;
62 62 if ((ctrl & 0x40000100) == 0x40000000) {
63 if (!(ctrl & 0x00000100)) { 63 if (M2)
64 if (ctrl & 0x40000000) 64 clk = clk * N2 / M2;
65 clk = clk * N2 / M2; 65 else
66 clk = 0;
67 }
66 } 68 }
67 69
68 return clk >> P; 70 return clk >> P;
@@ -177,6 +179,11 @@ nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
177 } 179 }
178 180
179 /* memory clock */ 181 /* memory clock */
182 if (!perflvl->memory) {
183 info->mpll_ctrl = 0x00000000;
184 goto out;
185 }
186
180 ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory, 187 ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
181 &N1, &M1, &N2, &M2, &log2P); 188 &N1, &M1, &N2, &M2, &log2P);
182 if (ret < 0) 189 if (ret < 0)
@@ -264,6 +271,9 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
264 mdelay(5); 271 mdelay(5);
265 nv_mask(dev, 0x00c040, 0x00000333, info->ctrl); 272 nv_mask(dev, 0x00c040, 0x00000333, info->ctrl);
266 273
274 if (!info->mpll_ctrl)
275 goto resume;
276
267 /* wait for vblank start on active crtcs, disable memory access */ 277 /* wait for vblank start on active crtcs, disable memory access */
268 for (i = 0; i < 2; i++) { 278 for (i = 0; i < 2; i++) {
269 if (!(crtc_mask & (1 << i))) 279 if (!(crtc_mask & (1 << i)))
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 8c979b31ff61..ac601f7c4e1a 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -131,8 +131,8 @@ nv50_graph_init(struct drm_device *dev, int engine)
131 NV_DEBUG(dev, "\n"); 131 NV_DEBUG(dev, "\n");
132 132
133 /* master reset */ 133 /* master reset */
134 nv_mask(dev, 0x000200, 0x00200100, 0x00000000); 134 nv_mask(dev, 0x000200, 0x00201000, 0x00000000);
135 nv_mask(dev, 0x000200, 0x00200100, 0x00200100); 135 nv_mask(dev, 0x000200, 0x00201000, 0x00201000);
136 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ 136 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
137 137
138 /* reset/enable traps and interrupts */ 138 /* reset/enable traps and interrupts */
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index d05c2c3b2444..4b46d6968566 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -601,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
601 gr_def(ctx, offset + 0x1c, 0x00880000); 601 gr_def(ctx, offset + 0x1c, 0x00880000);
602 break; 602 break;
603 case 0x86: 603 case 0x86:
604 gr_def(ctx, offset + 0x1c, 0x008c0000); 604 gr_def(ctx, offset + 0x1c, 0x018c0000);
605 break; 605 break;
606 case 0x92: 606 case 0x92:
607 case 0x96: 607 case 0x96:
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index 9da23838e63e..2e45e57fd869 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -160,7 +160,7 @@ nv50_vram_rblock(struct drm_device *dev)
160 colbits = (r4 & 0x0000f000) >> 12; 160 colbits = (r4 & 0x0000f000) >> 12;
161 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; 161 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
162 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; 162 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
163 banks = ((r4 & 0x01000000) ? 8 : 4); 163 banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
164 164
165 rowsize = parts * banks * (1 << colbits) * 8; 165 rowsize = parts * banks * (1 << colbits) * 8;
166 predicted = rowsize << rowbitsa; 166 predicted = rowsize << rowbitsa;
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index bbdbc51830c8..a74e501afd25 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -157,8 +157,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
157 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); 157 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
158 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; 158 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
159 struct drm_device *dev = chan->dev; 159 struct drm_device *dev = chan->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
160 int i = 0, gpc, tp, ret; 161 int i = 0, gpc, tp, ret;
161 u32 magic;
162 162
163 ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM, 163 ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
164 &grch->unk408004); 164 &grch->unk408004);
@@ -207,14 +207,37 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
207 nv_wo32(grch->mmio, i++ * 4, 0x0041880c); 207 nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
208 nv_wo32(grch->mmio, i++ * 4, 0x80000018); 208 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
209 209
210 magic = 0x02180000; 210 if (dev_priv->chipset != 0xc1) {
211 nv_wo32(grch->mmio, i++ * 4, 0x00405830); 211 u32 magic = 0x02180000;
212 nv_wo32(grch->mmio, i++ * 4, magic); 212 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
213 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 213 nv_wo32(grch->mmio, i++ * 4, magic);
214 for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) { 214 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
215 u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); 215 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
216 nv_wo32(grch->mmio, i++ * 4, reg); 216 u32 reg = TP_UNIT(gpc, tp, 0x520);
217 nv_wo32(grch->mmio, i++ * 4, magic); 217 nv_wo32(grch->mmio, i++ * 4, reg);
218 nv_wo32(grch->mmio, i++ * 4, magic);
219 magic += 0x0324;
220 }
221 }
222 } else {
223 u32 magic = 0x02180000;
224 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
225 nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218);
226 nv_wo32(grch->mmio, i++ * 4, 0x004064c4);
227 nv_wo32(grch->mmio, i++ * 4, 0x0086ffff);
228 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
229 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
230 u32 reg = TP_UNIT(gpc, tp, 0x520);
231 nv_wo32(grch->mmio, i++ * 4, reg);
232 nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic);
233 magic += 0x0324;
234 }
235 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
236 u32 reg = TP_UNIT(gpc, tp, 0x544);
237 nv_wo32(grch->mmio, i++ * 4, reg);
238 nv_wo32(grch->mmio, i++ * 4, magic);
239 magic += 0x0324;
240 }
218 } 241 }
219 } 242 }
220 243
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index dd0e6a736b3b..96b0b93d94ca 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1812,6 +1812,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1812 /* calculate first set of magics */ 1812 /* calculate first set of magics */
1813 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); 1813 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1814 1814
1815 gpc = -1;
1815 for (tp = 0; tp < priv->tp_total; tp++) { 1816 for (tp = 0; tp < priv->tp_total; tp++) {
1816 do { 1817 do {
1817 gpc = (gpc + 1) % priv->gpc_nr; 1818 gpc = (gpc + 1) % priv->gpc_nr;
@@ -1861,30 +1862,26 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1861 1862
1862 if (1) { 1863 if (1) {
1863 u32 tp_mask = 0, tp_set = 0; 1864 u32 tp_mask = 0, tp_set = 0;
1864 u8 tpnr[GPC_MAX]; 1865 u8 tpnr[GPC_MAX], a, b;
1865 1866
1866 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); 1867 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1867 for (gpc = 0; gpc < priv->gpc_nr; gpc++) 1868 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
1868 tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8); 1869 tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
1869 1870
1870 gpc = -1; 1871 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
1871 for (i = 0, gpc = -1; i < 32; i++) { 1872 a = (i * (priv->tp_total - 1)) / 32;
1872 int ltp = i * (priv->tp_total - 1) / 32; 1873 if (a != b) {
1873 1874 b = a;
1874 do { 1875 do {
1875 gpc = (gpc + 1) % priv->gpc_nr; 1876 gpc = (gpc + 1) % priv->gpc_nr;
1876 } while (!tpnr[gpc]); 1877 } while (!tpnr[gpc]);
1877 tp = priv->tp_nr[gpc] - tpnr[gpc]--; 1878 tp = priv->tp_nr[gpc] - tpnr[gpc]--;
1878 1879
1879 tp_set |= 1 << ((gpc * 8) + tp); 1880 tp_set |= 1 << ((gpc * 8) + tp);
1881 }
1880 1882
1881 do { 1883 nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
1882 nv_wr32(dev, 0x406800 + (i * 0x20), tp_set); 1884 nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask);
1883 tp_set ^= tp_mask;
1884 nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set);
1885 tp_set ^= tp_mask;
1886 } while (ltp == (++i * (priv->tp_total - 1) / 32));
1887 i--;
1888 } 1885 }
1889 } 1886 }
1890 1887
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index edbfe9360ae2..ce984d573a51 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -43,7 +43,7 @@ static const u8 types[256] = {
43 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 43 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0, 44 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
46 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 46 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
47 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3, 47 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
48 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0, 48 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
49 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0 49 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
@@ -110,22 +110,26 @@ nvc0_vram_init(struct drm_device *dev)
110 u32 bsize = nv_rd32(dev, 0x10f20c); 110 u32 bsize = nv_rd32(dev, 0x10f20c);
111 u32 offset, length; 111 u32 offset, length;
112 bool uniform = true; 112 bool uniform = true;
113 int ret, i; 113 int ret, part;
114 114
115 NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800)); 115 NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800));
116 NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize); 116 NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize);
117 117
118 /* read amount of vram attached to each memory controller */ 118 /* read amount of vram attached to each memory controller */
119 for (i = 0; i < parts; i++) { 119 part = 0;
120 u32 psize = nv_rd32(dev, 0x11020c + (i * 0x1000)); 120 while (parts) {
121 u32 psize = nv_rd32(dev, 0x11020c + (part++ * 0x1000));
122 if (psize == 0)
123 continue;
124 parts--;
125
121 if (psize != bsize) { 126 if (psize != bsize) {
122 if (psize < bsize) 127 if (psize < bsize)
123 bsize = psize; 128 bsize = psize;
124 uniform = false; 129 uniform = false;
125 } 130 }
126 131
127 NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", i, psize); 132 NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
128
129 dev_priv->vram_size += (u64)psize << 20; 133 dev_priv->vram_size += (u64)psize << 20;
130 } 134 }
131 135
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 9f363e0c4b60..cf8b4bc3e73d 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -70,7 +70,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
70 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ 70 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
71 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ 71 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
72 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ 72 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
73 radeon_trace_points.o ni.o cayman_blit_shaders.o 73 radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o
74 74
75radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 75radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
76radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o 76radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a515b2a09d85..87631fede1f8 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -558,7 +558,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
558 bpc = connector->display_info.bpc; 558 bpc = connector->display_info.bpc;
559 encoder_mode = atombios_get_encoder_mode(encoder); 559 encoder_mode = atombios_get_encoder_mode(encoder);
560 if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || 560 if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
561 radeon_encoder_is_dp_bridge(encoder)) { 561 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
562 if (connector) { 562 if (connector) {
563 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 563 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
564 struct radeon_connector_atom_dig *dig_connector = 564 struct radeon_connector_atom_dig *dig_connector =
@@ -638,44 +638,29 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
638 if (ss_enabled && ss->percentage) 638 if (ss_enabled && ss->percentage)
639 args.v3.sInput.ucDispPllConfig |= 639 args.v3.sInput.ucDispPllConfig |=
640 DISPPLL_CONFIG_SS_ENABLE; 640 DISPPLL_CONFIG_SS_ENABLE;
641 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT) || 641 if (ENCODER_MODE_IS_DP(encoder_mode)) {
642 radeon_encoder_is_dp_bridge(encoder)) { 642 args.v3.sInput.ucDispPllConfig |=
643 DISPPLL_CONFIG_COHERENT_MODE;
644 /* 16200 or 27000 */
645 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
646 } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
643 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 647 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
644 if (encoder_mode == ATOM_ENCODER_MODE_DP) { 648 if (encoder_mode == ATOM_ENCODER_MODE_HDMI)
649 /* deep color support */
650 args.v3.sInput.usPixelClock =
651 cpu_to_le16((mode->clock * bpc / 8) / 10);
652 if (dig->coherent_mode)
645 args.v3.sInput.ucDispPllConfig |= 653 args.v3.sInput.ucDispPllConfig |=
646 DISPPLL_CONFIG_COHERENT_MODE; 654 DISPPLL_CONFIG_COHERENT_MODE;
647 /* 16200 or 27000 */ 655 if (mode->clock > 165000)
648 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
649 } else {
650 if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
651 /* deep color support */
652 args.v3.sInput.usPixelClock =
653 cpu_to_le16((mode->clock * bpc / 8) / 10);
654 }
655 if (dig->coherent_mode)
656 args.v3.sInput.ucDispPllConfig |=
657 DISPPLL_CONFIG_COHERENT_MODE;
658 if (mode->clock > 165000)
659 args.v3.sInput.ucDispPllConfig |=
660 DISPPLL_CONFIG_DUAL_LINK;
661 }
662 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
663 if (encoder_mode == ATOM_ENCODER_MODE_DP) {
664 args.v3.sInput.ucDispPllConfig |= 656 args.v3.sInput.ucDispPllConfig |=
665 DISPPLL_CONFIG_COHERENT_MODE; 657 DISPPLL_CONFIG_DUAL_LINK;
666 /* 16200 or 27000 */
667 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
668 } else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) {
669 if (mode->clock > 165000)
670 args.v3.sInput.ucDispPllConfig |=
671 DISPPLL_CONFIG_DUAL_LINK;
672 }
673 } 658 }
674 if (radeon_encoder_is_dp_bridge(encoder)) { 659 if (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
675 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); 660 ENCODER_OBJECT_ID_NONE)
676 struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); 661 args.v3.sInput.ucExtTransmitterID =
677 args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id; 662 radeon_encoder_get_dp_bridge_encoder_id(encoder);
678 } else 663 else
679 args.v3.sInput.ucExtTransmitterID = 0; 664 args.v3.sInput.ucExtTransmitterID = 0;
680 665
681 atom_execute_table(rdev->mode_info.atom_context, 666 atom_execute_table(rdev->mode_info.atom_context,
@@ -945,6 +930,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
945 bpc = connector->display_info.bpc; 930 bpc = connector->display_info.bpc;
946 931
947 switch (encoder_mode) { 932 switch (encoder_mode) {
933 case ATOM_ENCODER_MODE_DP_MST:
948 case ATOM_ENCODER_MODE_DP: 934 case ATOM_ENCODER_MODE_DP:
949 /* DP/eDP */ 935 /* DP/eDP */
950 dp_clock = dig_connector->dp_clock / 10; 936 dp_clock = dig_connector->dp_clock / 10;
@@ -1450,7 +1436,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1450 * PPLL/DCPLL programming and only program the DP DTO for the 1436 * PPLL/DCPLL programming and only program the DP DTO for the
1451 * crtc virtual pixel clock. 1437 * crtc virtual pixel clock.
1452 */ 1438 */
1453 if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) { 1439 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
1454 if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk) 1440 if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk)
1455 return ATOM_PPLL_INVALID; 1441 return ATOM_PPLL_INVALID;
1456 } 1442 }
@@ -1536,12 +1522,6 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
1536 struct drm_display_mode *mode, 1522 struct drm_display_mode *mode,
1537 struct drm_display_mode *adjusted_mode) 1523 struct drm_display_mode *adjusted_mode)
1538{ 1524{
1539 struct drm_device *dev = crtc->dev;
1540 struct radeon_device *rdev = dev->dev_private;
1541
1542 /* adjust pm to upcoming mode change */
1543 radeon_pm_compute_clocks(rdev);
1544
1545 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 1525 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
1546 return false; 1526 return false;
1547 return true; 1527 return true;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 79e8ebc05307..6fb335a4fdda 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -283,7 +283,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
283 } 283 }
284 } 284 }
285 285
286 DRM_ERROR("aux i2c too many retries, giving up\n"); 286 DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
287 return -EREMOTEIO; 287 return -EREMOTEIO;
288} 288}
289 289
@@ -482,7 +482,8 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
482 int bpp = convert_bpc_to_bpp(connector->display_info.bpc); 482 int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
483 int lane_num, max_pix_clock; 483 int lane_num, max_pix_clock;
484 484
485 if (radeon_connector_encoder_is_dp_bridge(connector)) 485 if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
486 ENCODER_OBJECT_ID_NUTMEG)
486 return 270000; 487 return 270000;
487 488
488 lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock); 489 lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
@@ -553,17 +554,32 @@ static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
553{ 554{
554 struct drm_device *dev = encoder->dev; 555 struct drm_device *dev = encoder->dev;
555 struct radeon_device *rdev = dev->dev_private; 556 struct radeon_device *rdev = dev->dev_private;
557 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
556 int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; 558 int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
557 559
558 if (!ASIC_IS_DCE4(rdev)) 560 if (!ASIC_IS_DCE4(rdev))
559 return; 561 return;
560 562
561 if (radeon_connector_encoder_is_dp_bridge(connector)) 563 if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
564 ENCODER_OBJECT_ID_NUTMEG)
562 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; 565 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
566 else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
567 ENCODER_OBJECT_ID_TRAVIS)
568 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
569 else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
570 u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
571 if (tmp & 1)
572 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
573 }
563 574
564 atombios_dig_encoder_setup(encoder, 575 atombios_dig_encoder_setup(encoder,
565 ATOM_ENCODER_CMD_SETUP_PANEL_MODE, 576 ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
566 panel_mode); 577 panel_mode);
578
579 if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
580 (panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
581 radeon_write_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
582 }
567} 583}
568 584
569void radeon_dp_set_link_config(struct drm_connector *connector, 585void radeon_dp_set_link_config(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
new file mode 100644
index 000000000000..39c04c1b8472
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -0,0 +1,2369 @@
1/*
2 * Copyright 2007-11 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "drm_crtc_helper.h"
28#include "radeon_drm.h"
29#include "radeon.h"
30#include "atom.h"
31
32extern int atom_debug;
33
34/* evil but including atombios.h is much worse */
35bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
36 struct drm_display_mode *mode);
37
38
39static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
40{
41 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
42 switch (radeon_encoder->encoder_id) {
43 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
44 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
45 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
46 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
47 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
48 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
49 case ENCODER_OBJECT_ID_INTERNAL_DDI:
50 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
51 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
52 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
53 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
54 return true;
55 default:
56 return false;
57 }
58}
59
60static struct drm_connector *
61radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
62{
63 struct drm_device *dev = encoder->dev;
64 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
65 struct drm_connector *connector;
66 struct radeon_connector *radeon_connector;
67
68 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
69 radeon_connector = to_radeon_connector(connector);
70 if (radeon_encoder->devices & radeon_connector->devices)
71 return connector;
72 }
73 return NULL;
74}
75
76static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
77 struct drm_display_mode *mode,
78 struct drm_display_mode *adjusted_mode)
79{
80 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
81 struct drm_device *dev = encoder->dev;
82 struct radeon_device *rdev = dev->dev_private;
83
84 /* set the active encoder to connector routing */
85 radeon_encoder_set_active_device(encoder);
86 drm_mode_set_crtcinfo(adjusted_mode, 0);
87
88 /* hw bug */
89 if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
90 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
91 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
92
93 /* get the native mode for LVDS */
94 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
95 radeon_panel_mode_fixup(encoder, adjusted_mode);
96
97 /* get the native mode for TV */
98 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
99 struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
100 if (tv_dac) {
101 if (tv_dac->tv_std == TV_STD_NTSC ||
102 tv_dac->tv_std == TV_STD_NTSC_J ||
103 tv_dac->tv_std == TV_STD_PAL_M)
104 radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
105 else
106 radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
107 }
108 }
109
110 if (ASIC_IS_DCE3(rdev) &&
111 ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
112 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
113 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
114 radeon_dp_set_link_config(connector, mode);
115 }
116
117 return true;
118}
119
120static void
121atombios_dac_setup(struct drm_encoder *encoder, int action)
122{
123 struct drm_device *dev = encoder->dev;
124 struct radeon_device *rdev = dev->dev_private;
125 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
126 DAC_ENCODER_CONTROL_PS_ALLOCATION args;
127 int index = 0;
128 struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
129
130 memset(&args, 0, sizeof(args));
131
132 switch (radeon_encoder->encoder_id) {
133 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
134 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
135 index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
136 break;
137 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
138 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
139 index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
140 break;
141 }
142
143 args.ucAction = action;
144
145 if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT))
146 args.ucDacStandard = ATOM_DAC1_PS2;
147 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
148 args.ucDacStandard = ATOM_DAC1_CV;
149 else {
150 switch (dac_info->tv_std) {
151 case TV_STD_PAL:
152 case TV_STD_PAL_M:
153 case TV_STD_SCART_PAL:
154 case TV_STD_SECAM:
155 case TV_STD_PAL_CN:
156 args.ucDacStandard = ATOM_DAC1_PAL;
157 break;
158 case TV_STD_NTSC:
159 case TV_STD_NTSC_J:
160 case TV_STD_PAL_60:
161 default:
162 args.ucDacStandard = ATOM_DAC1_NTSC;
163 break;
164 }
165 }
166 args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
167
168 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
169
170}
171
172static void
173atombios_tv_setup(struct drm_encoder *encoder, int action)
174{
175 struct drm_device *dev = encoder->dev;
176 struct radeon_device *rdev = dev->dev_private;
177 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
178 TV_ENCODER_CONTROL_PS_ALLOCATION args;
179 int index = 0;
180 struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
181
182 memset(&args, 0, sizeof(args));
183
184 index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
185
186 args.sTVEncoder.ucAction = action;
187
188 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
189 args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
190 else {
191 switch (dac_info->tv_std) {
192 case TV_STD_NTSC:
193 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
194 break;
195 case TV_STD_PAL:
196 args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
197 break;
198 case TV_STD_PAL_M:
199 args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
200 break;
201 case TV_STD_PAL_60:
202 args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
203 break;
204 case TV_STD_NTSC_J:
205 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
206 break;
207 case TV_STD_SCART_PAL:
208 args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
209 break;
210 case TV_STD_SECAM:
211 args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
212 break;
213 case TV_STD_PAL_CN:
214 args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
215 break;
216 default:
217 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
218 break;
219 }
220 }
221
222 args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
223
224 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
225
226}
227
228union dvo_encoder_control {
229 ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
230 DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
231 DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
232};
233
234void
235atombios_dvo_setup(struct drm_encoder *encoder, int action)
236{
237 struct drm_device *dev = encoder->dev;
238 struct radeon_device *rdev = dev->dev_private;
239 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
240 union dvo_encoder_control args;
241 int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
242 uint8_t frev, crev;
243
244 memset(&args, 0, sizeof(args));
245
246 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
247 return;
248
249 switch (frev) {
250 case 1:
251 switch (crev) {
252 case 1:
253 /* R4xx, R5xx */
254 args.ext_tmds.sXTmdsEncoder.ucEnable = action;
255
256 if (radeon_encoder->pixel_clock > 165000)
257 args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
258
259 args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
260 break;
261 case 2:
262 /* RS600/690/740 */
263 args.dvo.sDVOEncoder.ucAction = action;
264 args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
265 /* DFP1, CRT1, TV1 depending on the type of port */
266 args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
267
268 if (radeon_encoder->pixel_clock > 165000)
269 args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
270 break;
271 case 3:
272 /* R6xx */
273 args.dvo_v3.ucAction = action;
274 args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
275 args.dvo_v3.ucDVOConfig = 0; /* XXX */
276 break;
277 default:
278 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
279 break;
280 }
281 break;
282 default:
283 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
284 break;
285 }
286
287 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
288}
289
290union lvds_encoder_control {
291 LVDS_ENCODER_CONTROL_PS_ALLOCATION v1;
292 LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
293};
294
295void
296atombios_digital_setup(struct drm_encoder *encoder, int action)
297{
298 struct drm_device *dev = encoder->dev;
299 struct radeon_device *rdev = dev->dev_private;
300 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
301 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
302 union lvds_encoder_control args;
303 int index = 0;
304 int hdmi_detected = 0;
305 uint8_t frev, crev;
306
307 if (!dig)
308 return;
309
310 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
311 hdmi_detected = 1;
312
313 memset(&args, 0, sizeof(args));
314
315 switch (radeon_encoder->encoder_id) {
316 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
317 index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
318 break;
319 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
320 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
321 index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
322 break;
323 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
324 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
325 index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
326 else
327 index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
328 break;
329 }
330
331 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
332 return;
333
334 switch (frev) {
335 case 1:
336 case 2:
337 switch (crev) {
338 case 1:
339 args.v1.ucMisc = 0;
340 args.v1.ucAction = action;
341 if (hdmi_detected)
342 args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
343 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
344 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
345 if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
346 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
347 if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
348 args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
349 } else {
350 if (dig->linkb)
351 args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
352 if (radeon_encoder->pixel_clock > 165000)
353 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
354 /*if (pScrn->rgbBits == 8) */
355 args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
356 }
357 break;
358 case 2:
359 case 3:
360 args.v2.ucMisc = 0;
361 args.v2.ucAction = action;
362 if (crev == 3) {
363 if (dig->coherent_mode)
364 args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
365 }
366 if (hdmi_detected)
367 args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
368 args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
369 args.v2.ucTruncate = 0;
370 args.v2.ucSpatial = 0;
371 args.v2.ucTemporal = 0;
372 args.v2.ucFRC = 0;
373 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
374 if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
375 args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
376 if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
377 args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
378 if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
379 args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
380 }
381 if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
382 args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
383 if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
384 args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
385 if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
386 args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
387 }
388 } else {
389 if (dig->linkb)
390 args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
391 if (radeon_encoder->pixel_clock > 165000)
392 args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
393 }
394 break;
395 default:
396 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
397 break;
398 }
399 break;
400 default:
401 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
402 break;
403 }
404
405 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
406}
407
408int
409atombios_get_encoder_mode(struct drm_encoder *encoder)
410{
411 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
412 struct drm_device *dev = encoder->dev;
413 struct radeon_device *rdev = dev->dev_private;
414 struct drm_connector *connector;
415 struct radeon_connector *radeon_connector;
416 struct radeon_connector_atom_dig *dig_connector;
417
418 /* dp bridges are always DP */
419 if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
420 return ATOM_ENCODER_MODE_DP;
421
422 /* DVO is always DVO */
423 if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
424 return ATOM_ENCODER_MODE_DVO;
425
426 connector = radeon_get_connector_for_encoder(encoder);
427 /* if we don't have an active device yet, just use one of
428 * the connectors tied to the encoder.
429 */
430 if (!connector)
431 connector = radeon_get_connector_for_encoder_init(encoder);
432 radeon_connector = to_radeon_connector(connector);
433
434 switch (connector->connector_type) {
435 case DRM_MODE_CONNECTOR_DVII:
436 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
437 if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
438 /* fix me */
439 if (ASIC_IS_DCE4(rdev))
440 return ATOM_ENCODER_MODE_DVI;
441 else
442 return ATOM_ENCODER_MODE_HDMI;
443 } else if (radeon_connector->use_digital)
444 return ATOM_ENCODER_MODE_DVI;
445 else
446 return ATOM_ENCODER_MODE_CRT;
447 break;
448 case DRM_MODE_CONNECTOR_DVID:
449 case DRM_MODE_CONNECTOR_HDMIA:
450 default:
451 if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
452 /* fix me */
453 if (ASIC_IS_DCE4(rdev))
454 return ATOM_ENCODER_MODE_DVI;
455 else
456 return ATOM_ENCODER_MODE_HDMI;
457 } else
458 return ATOM_ENCODER_MODE_DVI;
459 break;
460 case DRM_MODE_CONNECTOR_LVDS:
461 return ATOM_ENCODER_MODE_LVDS;
462 break;
463 case DRM_MODE_CONNECTOR_DisplayPort:
464 dig_connector = radeon_connector->con_priv;
465 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
466 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
467 return ATOM_ENCODER_MODE_DP;
468 else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
469 /* fix me */
470 if (ASIC_IS_DCE4(rdev))
471 return ATOM_ENCODER_MODE_DVI;
472 else
473 return ATOM_ENCODER_MODE_HDMI;
474 } else
475 return ATOM_ENCODER_MODE_DVI;
476 break;
477 case DRM_MODE_CONNECTOR_eDP:
478 return ATOM_ENCODER_MODE_DP;
479 case DRM_MODE_CONNECTOR_DVIA:
480 case DRM_MODE_CONNECTOR_VGA:
481 return ATOM_ENCODER_MODE_CRT;
482 break;
483 case DRM_MODE_CONNECTOR_Composite:
484 case DRM_MODE_CONNECTOR_SVIDEO:
485 case DRM_MODE_CONNECTOR_9PinDIN:
486 /* fix me */
487 return ATOM_ENCODER_MODE_TV;
488 /*return ATOM_ENCODER_MODE_CV;*/
489 break;
490 }
491}
492
493/*
494 * DIG Encoder/Transmitter Setup
495 *
496 * DCE 3.0/3.1
497 * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
498 * Supports up to 3 digital outputs
499 * - 2 DIG encoder blocks.
500 * DIG1 can drive UNIPHY link A or link B
501 * DIG2 can drive UNIPHY link B or LVTMA
502 *
503 * DCE 3.2
504 * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
505 * Supports up to 5 digital outputs
506 * - 2 DIG encoder blocks.
507 * DIG1/2 can drive UNIPHY0/1/2 link A or link B
508 *
509 * DCE 4.0/5.0
510 * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
511 * Supports up to 6 digital outputs
512 * - 6 DIG encoder blocks.
513 * - DIG to PHY mapping is hardcoded
514 * DIG1 drives UNIPHY0 link A, A+B
515 * DIG2 drives UNIPHY0 link B
516 * DIG3 drives UNIPHY1 link A, A+B
517 * DIG4 drives UNIPHY1 link B
518 * DIG5 drives UNIPHY2 link A, A+B
519 * DIG6 drives UNIPHY2 link B
520 *
521 * DCE 4.1
522 * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
523 * Supports up to 6 digital outputs
524 * - 2 DIG encoder blocks.
525 * DIG1/2 can drive UNIPHY0/1/2 link A or link B
526 *
527 * Routing
528 * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
529 * Examples:
530 * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
531 * crtc1 -> dig1 -> UNIPHY0 link B -> DP
532 * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
533 * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
534 */
535
536union dig_encoder_control {
537 DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
538 DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
539 DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
540 DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
541};
542
543void
544atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
545{
546 struct drm_device *dev = encoder->dev;
547 struct radeon_device *rdev = dev->dev_private;
548 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
549 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
550 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
551 union dig_encoder_control args;
552 int index = 0;
553 uint8_t frev, crev;
554 int dp_clock = 0;
555 int dp_lane_count = 0;
556 int hpd_id = RADEON_HPD_NONE;
557 int bpc = 8;
558
559 if (connector) {
560 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
561 struct radeon_connector_atom_dig *dig_connector =
562 radeon_connector->con_priv;
563
564 dp_clock = dig_connector->dp_clock;
565 dp_lane_count = dig_connector->dp_lane_count;
566 hpd_id = radeon_connector->hpd.hpd;
567 bpc = connector->display_info.bpc;
568 }
569
570 /* no dig encoder assigned */
571 if (dig->dig_encoder == -1)
572 return;
573
574 memset(&args, 0, sizeof(args));
575
576 if (ASIC_IS_DCE4(rdev))
577 index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
578 else {
579 if (dig->dig_encoder)
580 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
581 else
582 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
583 }
584
585 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
586 return;
587
588 switch (frev) {
589 case 1:
590 switch (crev) {
591 case 1:
592 args.v1.ucAction = action;
593 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
594 if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
595 args.v3.ucPanelMode = panel_mode;
596 else
597 args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
598
599 if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
600 args.v1.ucLaneNum = dp_lane_count;
601 else if (radeon_encoder->pixel_clock > 165000)
602 args.v1.ucLaneNum = 8;
603 else
604 args.v1.ucLaneNum = 4;
605
606 if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
607 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
608 switch (radeon_encoder->encoder_id) {
609 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
610 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
611 break;
612 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
613 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
614 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
615 break;
616 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
617 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
618 break;
619 }
620 if (dig->linkb)
621 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
622 else
623 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
624 break;
625 case 2:
626 case 3:
627 args.v3.ucAction = action;
628 args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
629 if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
630 args.v3.ucPanelMode = panel_mode;
631 else
632 args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder);
633
634 if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
635 args.v3.ucLaneNum = dp_lane_count;
636 else if (radeon_encoder->pixel_clock > 165000)
637 args.v3.ucLaneNum = 8;
638 else
639 args.v3.ucLaneNum = 4;
640
641 if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
642 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
643 args.v3.acConfig.ucDigSel = dig->dig_encoder;
644 switch (bpc) {
645 case 0:
646 args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
647 break;
648 case 6:
649 args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
650 break;
651 case 8:
652 default:
653 args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
654 break;
655 case 10:
656 args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
657 break;
658 case 12:
659 args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
660 break;
661 case 16:
662 args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
663 break;
664 }
665 break;
666 case 4:
667 args.v4.ucAction = action;
668 args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
669 if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
670 args.v4.ucPanelMode = panel_mode;
671 else
672 args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder);
673
674 if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
675 args.v4.ucLaneNum = dp_lane_count;
676 else if (radeon_encoder->pixel_clock > 165000)
677 args.v4.ucLaneNum = 8;
678 else
679 args.v4.ucLaneNum = 4;
680
681 if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) {
682 if (dp_clock == 270000)
683 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
684 else if (dp_clock == 540000)
685 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
686 }
687 args.v4.acConfig.ucDigSel = dig->dig_encoder;
688 switch (bpc) {
689 case 0:
690 args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
691 break;
692 case 6:
693 args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
694 break;
695 case 8:
696 default:
697 args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
698 break;
699 case 10:
700 args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
701 break;
702 case 12:
703 args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
704 break;
705 case 16:
706 args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
707 break;
708 }
709 if (hpd_id == RADEON_HPD_NONE)
710 args.v4.ucHPD_ID = 0;
711 else
712 args.v4.ucHPD_ID = hpd_id + 1;
713 break;
714 default:
715 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
716 break;
717 }
718 break;
719 default:
720 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
721 break;
722 }
723
724 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
725
726}
727
728union dig_transmitter_control {
729 DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
730 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
731 DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
732 DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
733};
734
735void
736atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
737{
738 struct drm_device *dev = encoder->dev;
739 struct radeon_device *rdev = dev->dev_private;
740 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
741 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
742 struct drm_connector *connector;
743 union dig_transmitter_control args;
744 int index = 0;
745 uint8_t frev, crev;
746 bool is_dp = false;
747 int pll_id = 0;
748 int dp_clock = 0;
749 int dp_lane_count = 0;
750 int connector_object_id = 0;
751 int igp_lane_info = 0;
752 int dig_encoder = dig->dig_encoder;
753
754 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
755 connector = radeon_get_connector_for_encoder_init(encoder);
756 /* just needed to avoid bailing in the encoder check. the encoder
757 * isn't used for init
758 */
759 dig_encoder = 0;
760 } else
761 connector = radeon_get_connector_for_encoder(encoder);
762
763 if (connector) {
764 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
765 struct radeon_connector_atom_dig *dig_connector =
766 radeon_connector->con_priv;
767
768 dp_clock = dig_connector->dp_clock;
769 dp_lane_count = dig_connector->dp_lane_count;
770 connector_object_id =
771 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
772 igp_lane_info = dig_connector->igp_lane_info;
773 }
774
775 if (encoder->crtc) {
776 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
777 pll_id = radeon_crtc->pll_id;
778 }
779
780 /* no dig encoder assigned */
781 if (dig_encoder == -1)
782 return;
783
784 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)))
785 is_dp = true;
786
787 memset(&args, 0, sizeof(args));
788
789 switch (radeon_encoder->encoder_id) {
790 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
791 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
792 break;
793 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
794 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
795 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
796 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
797 break;
798 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
799 index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
800 break;
801 }
802
803 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
804 return;
805
806 switch (frev) {
807 case 1:
808 switch (crev) {
809 case 1:
810 args.v1.ucAction = action;
811 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
812 args.v1.usInitInfo = cpu_to_le16(connector_object_id);
813 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
814 args.v1.asMode.ucLaneSel = lane_num;
815 args.v1.asMode.ucLaneSet = lane_set;
816 } else {
817 if (is_dp)
818 args.v1.usPixelClock =
819 cpu_to_le16(dp_clock / 10);
820 else if (radeon_encoder->pixel_clock > 165000)
821 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
822 else
823 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
824 }
825
826 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
827
828 if (dig_encoder)
829 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
830 else
831 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
832
833 if ((rdev->flags & RADEON_IS_IGP) &&
834 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
835 if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
836 if (igp_lane_info & 0x1)
837 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
838 else if (igp_lane_info & 0x2)
839 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
840 else if (igp_lane_info & 0x4)
841 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
842 else if (igp_lane_info & 0x8)
843 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
844 } else {
845 if (igp_lane_info & 0x3)
846 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
847 else if (igp_lane_info & 0xc)
848 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
849 }
850 }
851
852 if (dig->linkb)
853 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
854 else
855 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
856
857 if (is_dp)
858 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
859 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
860 if (dig->coherent_mode)
861 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
862 if (radeon_encoder->pixel_clock > 165000)
863 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
864 }
865 break;
866 case 2:
867 args.v2.ucAction = action;
868 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
869 args.v2.usInitInfo = cpu_to_le16(connector_object_id);
870 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
871 args.v2.asMode.ucLaneSel = lane_num;
872 args.v2.asMode.ucLaneSet = lane_set;
873 } else {
874 if (is_dp)
875 args.v2.usPixelClock =
876 cpu_to_le16(dp_clock / 10);
877 else if (radeon_encoder->pixel_clock > 165000)
878 args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
879 else
880 args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
881 }
882
883 args.v2.acConfig.ucEncoderSel = dig_encoder;
884 if (dig->linkb)
885 args.v2.acConfig.ucLinkSel = 1;
886
887 switch (radeon_encoder->encoder_id) {
888 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
889 args.v2.acConfig.ucTransmitterSel = 0;
890 break;
891 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
892 args.v2.acConfig.ucTransmitterSel = 1;
893 break;
894 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
895 args.v2.acConfig.ucTransmitterSel = 2;
896 break;
897 }
898
899 if (is_dp) {
900 args.v2.acConfig.fCoherentMode = 1;
901 args.v2.acConfig.fDPConnector = 1;
902 } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
903 if (dig->coherent_mode)
904 args.v2.acConfig.fCoherentMode = 1;
905 if (radeon_encoder->pixel_clock > 165000)
906 args.v2.acConfig.fDualLinkConnector = 1;
907 }
908 break;
909 case 3:
910 args.v3.ucAction = action;
911 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
912 args.v3.usInitInfo = cpu_to_le16(connector_object_id);
913 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
914 args.v3.asMode.ucLaneSel = lane_num;
915 args.v3.asMode.ucLaneSet = lane_set;
916 } else {
917 if (is_dp)
918 args.v3.usPixelClock =
919 cpu_to_le16(dp_clock / 10);
920 else if (radeon_encoder->pixel_clock > 165000)
921 args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
922 else
923 args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
924 }
925
926 if (is_dp)
927 args.v3.ucLaneNum = dp_lane_count;
928 else if (radeon_encoder->pixel_clock > 165000)
929 args.v3.ucLaneNum = 8;
930 else
931 args.v3.ucLaneNum = 4;
932
933 if (dig->linkb)
934 args.v3.acConfig.ucLinkSel = 1;
935 if (dig_encoder & 1)
936 args.v3.acConfig.ucEncoderSel = 1;
937
938 /* Select the PLL for the PHY
939 * DP PHY should be clocked from external src if there is
940 * one.
941 */
942 /* On DCE4, if there is an external clock, it generates the DP ref clock */
943 if (is_dp && rdev->clock.dp_extclk)
944 args.v3.acConfig.ucRefClkSource = 2; /* external src */
945 else
946 args.v3.acConfig.ucRefClkSource = pll_id;
947
948 switch (radeon_encoder->encoder_id) {
949 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
950 args.v3.acConfig.ucTransmitterSel = 0;
951 break;
952 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
953 args.v3.acConfig.ucTransmitterSel = 1;
954 break;
955 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
956 args.v3.acConfig.ucTransmitterSel = 2;
957 break;
958 }
959
960 if (is_dp)
961 args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
962 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
963 if (dig->coherent_mode)
964 args.v3.acConfig.fCoherentMode = 1;
965 if (radeon_encoder->pixel_clock > 165000)
966 args.v3.acConfig.fDualLinkConnector = 1;
967 }
968 break;
969 case 4:
970 args.v4.ucAction = action;
971 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
972 args.v4.usInitInfo = cpu_to_le16(connector_object_id);
973 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
974 args.v4.asMode.ucLaneSel = lane_num;
975 args.v4.asMode.ucLaneSet = lane_set;
976 } else {
977 if (is_dp)
978 args.v4.usPixelClock =
979 cpu_to_le16(dp_clock / 10);
980 else if (radeon_encoder->pixel_clock > 165000)
981 args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
982 else
983 args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
984 }
985
986 if (is_dp)
987 args.v4.ucLaneNum = dp_lane_count;
988 else if (radeon_encoder->pixel_clock > 165000)
989 args.v4.ucLaneNum = 8;
990 else
991 args.v4.ucLaneNum = 4;
992
993 if (dig->linkb)
994 args.v4.acConfig.ucLinkSel = 1;
995 if (dig_encoder & 1)
996 args.v4.acConfig.ucEncoderSel = 1;
997
998 /* Select the PLL for the PHY
999 * DP PHY should be clocked from external src if there is
1000 * one.
1001 */
1002 /* On DCE5 DCPLL usually generates the DP ref clock */
1003 if (is_dp) {
1004 if (rdev->clock.dp_extclk)
1005 args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
1006 else
1007 args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
1008 } else
1009 args.v4.acConfig.ucRefClkSource = pll_id;
1010
1011 switch (radeon_encoder->encoder_id) {
1012 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1013 args.v4.acConfig.ucTransmitterSel = 0;
1014 break;
1015 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1016 args.v4.acConfig.ucTransmitterSel = 1;
1017 break;
1018 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1019 args.v4.acConfig.ucTransmitterSel = 2;
1020 break;
1021 }
1022
1023 if (is_dp)
1024 args.v4.acConfig.fCoherentMode = 1; /* DP requires coherent */
1025 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
1026 if (dig->coherent_mode)
1027 args.v4.acConfig.fCoherentMode = 1;
1028 if (radeon_encoder->pixel_clock > 165000)
1029 args.v4.acConfig.fDualLinkConnector = 1;
1030 }
1031 break;
1032 default:
1033 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
1034 break;
1035 }
1036 break;
1037 default:
1038 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
1039 break;
1040 }
1041
1042 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1043}
1044
1045bool
1046atombios_set_edp_panel_power(struct drm_connector *connector, int action)
1047{
1048 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1049 struct drm_device *dev = radeon_connector->base.dev;
1050 struct radeon_device *rdev = dev->dev_private;
1051 union dig_transmitter_control args;
1052 int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
1053 uint8_t frev, crev;
1054
1055 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
1056 goto done;
1057
1058 if (!ASIC_IS_DCE4(rdev))
1059 goto done;
1060
1061 if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
1062 (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
1063 goto done;
1064
1065 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1066 goto done;
1067
1068 memset(&args, 0, sizeof(args));
1069
1070 args.v1.ucAction = action;
1071
1072 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1073
1074 /* wait for the panel to power up */
1075 if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
1076 int i;
1077
1078 for (i = 0; i < 300; i++) {
1079 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
1080 return true;
1081 mdelay(1);
1082 }
1083 return false;
1084 }
1085done:
1086 return true;
1087}
1088
1089union external_encoder_control {
1090 EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
1091 EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
1092};
1093
1094static void
1095atombios_external_encoder_setup(struct drm_encoder *encoder,
1096 struct drm_encoder *ext_encoder,
1097 int action)
1098{
1099 struct drm_device *dev = encoder->dev;
1100 struct radeon_device *rdev = dev->dev_private;
1101 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1102 struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
1103 union external_encoder_control args;
1104 struct drm_connector *connector;
1105 int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
1106 u8 frev, crev;
1107 int dp_clock = 0;
1108 int dp_lane_count = 0;
1109 int connector_object_id = 0;
1110 u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
1111 int bpc = 8;
1112
1113 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
1114 connector = radeon_get_connector_for_encoder_init(encoder);
1115 else
1116 connector = radeon_get_connector_for_encoder(encoder);
1117
1118 if (connector) {
1119 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1120 struct radeon_connector_atom_dig *dig_connector =
1121 radeon_connector->con_priv;
1122
1123 dp_clock = dig_connector->dp_clock;
1124 dp_lane_count = dig_connector->dp_lane_count;
1125 connector_object_id =
1126 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
1127 bpc = connector->display_info.bpc;
1128 }
1129
1130 memset(&args, 0, sizeof(args));
1131
1132 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1133 return;
1134
1135 switch (frev) {
1136 case 1:
1137 /* no params on frev 1 */
1138 break;
1139 case 2:
1140 switch (crev) {
1141 case 1:
1142 case 2:
1143 args.v1.sDigEncoder.ucAction = action;
1144 args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
1145 args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
1146
1147 if (ENCODER_MODE_IS_DP(args.v1.sDigEncoder.ucEncoderMode)) {
1148 if (dp_clock == 270000)
1149 args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
1150 args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
1151 } else if (radeon_encoder->pixel_clock > 165000)
1152 args.v1.sDigEncoder.ucLaneNum = 8;
1153 else
1154 args.v1.sDigEncoder.ucLaneNum = 4;
1155 break;
1156 case 3:
1157 args.v3.sExtEncoder.ucAction = action;
1158 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
1159 args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
1160 else
1161 args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
1162 args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
1163
1164 if (ENCODER_MODE_IS_DP(args.v3.sExtEncoder.ucEncoderMode)) {
1165 if (dp_clock == 270000)
1166 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
1167 else if (dp_clock == 540000)
1168 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
1169 args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
1170 } else if (radeon_encoder->pixel_clock > 165000)
1171 args.v3.sExtEncoder.ucLaneNum = 8;
1172 else
1173 args.v3.sExtEncoder.ucLaneNum = 4;
1174 switch (ext_enum) {
1175 case GRAPH_OBJECT_ENUM_ID1:
1176 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
1177 break;
1178 case GRAPH_OBJECT_ENUM_ID2:
1179 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
1180 break;
1181 case GRAPH_OBJECT_ENUM_ID3:
1182 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
1183 break;
1184 }
1185 switch (bpc) {
1186 case 0:
1187 args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
1188 break;
1189 case 6:
1190 args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
1191 break;
1192 case 8:
1193 default:
1194 args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
1195 break;
1196 case 10:
1197 args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
1198 break;
1199 case 12:
1200 args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
1201 break;
1202 case 16:
1203 args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
1204 break;
1205 }
1206 break;
1207 default:
1208 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
1209 return;
1210 }
1211 break;
1212 default:
1213 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
1214 return;
1215 }
1216 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1217}
1218
1219static void
1220atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
1221{
1222 struct drm_device *dev = encoder->dev;
1223 struct radeon_device *rdev = dev->dev_private;
1224 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1225 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1226 ENABLE_YUV_PS_ALLOCATION args;
1227 int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
1228 uint32_t temp, reg;
1229
1230 memset(&args, 0, sizeof(args));
1231
1232 if (rdev->family >= CHIP_R600)
1233 reg = R600_BIOS_3_SCRATCH;
1234 else
1235 reg = RADEON_BIOS_3_SCRATCH;
1236
1237 /* XXX: fix up scratch reg handling */
1238 temp = RREG32(reg);
1239 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1240 WREG32(reg, (ATOM_S3_TV1_ACTIVE |
1241 (radeon_crtc->crtc_id << 18)));
1242 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
1243 WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
1244 else
1245 WREG32(reg, 0);
1246
1247 if (enable)
1248 args.ucEnable = ATOM_ENABLE;
1249 args.ucCRTC = radeon_crtc->crtc_id;
1250
1251 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1252
1253 WREG32(reg, temp);
1254}
1255
1256static void
1257radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
1258{
1259 struct drm_device *dev = encoder->dev;
1260 struct radeon_device *rdev = dev->dev_private;
1261 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1262 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
1263 int index = 0;
1264
1265 memset(&args, 0, sizeof(args));
1266
1267 switch (radeon_encoder->encoder_id) {
1268 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1269 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1270 index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
1271 break;
1272 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1273 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1274 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1275 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
1276 break;
1277 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1278 index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
1279 break;
1280 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
1281 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1282 index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
1283 else
1284 index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
1285 break;
1286 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1287 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1288 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1289 index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
1290 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
1291 index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
1292 else
1293 index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
1294 break;
1295 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1296 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1297 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1298 index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
1299 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
1300 index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
1301 else
1302 index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
1303 break;
1304 default:
1305 return;
1306 }
1307
1308 switch (mode) {
1309 case DRM_MODE_DPMS_ON:
1310 args.ucAction = ATOM_ENABLE;
1311 /* workaround for DVOOutputControl on some RS690 systems */
1312 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
1313 u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
1314 WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
1315 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1316 WREG32(RADEON_BIOS_3_SCRATCH, reg);
1317 } else
1318 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1319 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1320 args.ucAction = ATOM_LCD_BLON;
1321 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1322 }
1323 break;
1324 case DRM_MODE_DPMS_STANDBY:
1325 case DRM_MODE_DPMS_SUSPEND:
1326 case DRM_MODE_DPMS_OFF:
1327 args.ucAction = ATOM_DISABLE;
1328 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1329 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1330 args.ucAction = ATOM_LCD_BLOFF;
1331 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1332 }
1333 break;
1334 }
1335}
1336
1337static void
1338radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1339{
1340 struct drm_device *dev = encoder->dev;
1341 struct radeon_device *rdev = dev->dev_private;
1342 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1343 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1344 struct radeon_connector *radeon_connector = NULL;
1345 struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
1346
1347 if (connector) {
1348 radeon_connector = to_radeon_connector(connector);
1349 radeon_dig_connector = radeon_connector->con_priv;
1350 }
1351
1352 switch (mode) {
1353 case DRM_MODE_DPMS_ON:
1354 /* some early dce3.2 boards have a bug in their transmitter control table */
1355 if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
1356 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1357 else
1358 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1359 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
1360 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1361 atombios_set_edp_panel_power(connector,
1362 ATOM_TRANSMITTER_ACTION_POWER_ON);
1363 radeon_dig_connector->edp_on = true;
1364 }
1365 if (ASIC_IS_DCE4(rdev))
1366 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
1367 radeon_dp_link_train(encoder, connector);
1368 if (ASIC_IS_DCE4(rdev))
1369 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
1370 }
1371 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1372 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
1373 break;
1374 case DRM_MODE_DPMS_STANDBY:
1375 case DRM_MODE_DPMS_SUSPEND:
1376 case DRM_MODE_DPMS_OFF:
1377 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1378 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
1379 if (ASIC_IS_DCE4(rdev))
1380 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
1381 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1382 atombios_set_edp_panel_power(connector,
1383 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1384 radeon_dig_connector->edp_on = false;
1385 }
1386 }
1387 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1388 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
1389 break;
1390 }
1391}
1392
1393static void
1394radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder,
1395 struct drm_encoder *ext_encoder,
1396 int mode)
1397{
1398 struct drm_device *dev = encoder->dev;
1399 struct radeon_device *rdev = dev->dev_private;
1400
1401 switch (mode) {
1402 case DRM_MODE_DPMS_ON:
1403 default:
1404 if (ASIC_IS_DCE41(rdev)) {
1405 atombios_external_encoder_setup(encoder, ext_encoder,
1406 EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
1407 atombios_external_encoder_setup(encoder, ext_encoder,
1408 EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
1409 } else
1410 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1411 break;
1412 case DRM_MODE_DPMS_STANDBY:
1413 case DRM_MODE_DPMS_SUSPEND:
1414 case DRM_MODE_DPMS_OFF:
1415 if (ASIC_IS_DCE41(rdev)) {
1416 atombios_external_encoder_setup(encoder, ext_encoder,
1417 EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
1418 atombios_external_encoder_setup(encoder, ext_encoder,
1419 EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
1420 } else
1421 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
1422 break;
1423 }
1424}
1425
1426static void
1427radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1428{
1429 struct drm_device *dev = encoder->dev;
1430 struct radeon_device *rdev = dev->dev_private;
1431 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1432 struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
1433
1434 DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
1435 radeon_encoder->encoder_id, mode, radeon_encoder->devices,
1436 radeon_encoder->active_device);
1437 switch (radeon_encoder->encoder_id) {
1438 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1439 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1440 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1441 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
1442 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1443 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1444 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1445 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1446 radeon_atom_encoder_dpms_avivo(encoder, mode);
1447 break;
1448 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1449 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1450 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1451 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1452 radeon_atom_encoder_dpms_dig(encoder, mode);
1453 break;
1454 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1455 if (ASIC_IS_DCE5(rdev)) {
1456 switch (mode) {
1457 case DRM_MODE_DPMS_ON:
1458 atombios_dvo_setup(encoder, ATOM_ENABLE);
1459 break;
1460 case DRM_MODE_DPMS_STANDBY:
1461 case DRM_MODE_DPMS_SUSPEND:
1462 case DRM_MODE_DPMS_OFF:
1463 atombios_dvo_setup(encoder, ATOM_DISABLE);
1464 break;
1465 }
1466 } else if (ASIC_IS_DCE3(rdev))
1467 radeon_atom_encoder_dpms_dig(encoder, mode);
1468 else
1469 radeon_atom_encoder_dpms_avivo(encoder, mode);
1470 break;
1471 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1472 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1473 if (ASIC_IS_DCE5(rdev)) {
1474 switch (mode) {
1475 case DRM_MODE_DPMS_ON:
1476 atombios_dac_setup(encoder, ATOM_ENABLE);
1477 break;
1478 case DRM_MODE_DPMS_STANDBY:
1479 case DRM_MODE_DPMS_SUSPEND:
1480 case DRM_MODE_DPMS_OFF:
1481 atombios_dac_setup(encoder, ATOM_DISABLE);
1482 break;
1483 }
1484 } else
1485 radeon_atom_encoder_dpms_avivo(encoder, mode);
1486 break;
1487 default:
1488 return;
1489 }
1490
1491 if (ext_encoder)
1492 radeon_atom_encoder_dpms_ext(encoder, ext_encoder, mode);
1493
1494 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
1495
1496}
1497
1498union crtc_source_param {
1499 SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
1500 SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
1501};
1502
1503static void
1504atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1505{
1506 struct drm_device *dev = encoder->dev;
1507 struct radeon_device *rdev = dev->dev_private;
1508 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1509 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1510 union crtc_source_param args;
1511 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
1512 uint8_t frev, crev;
1513 struct radeon_encoder_atom_dig *dig;
1514
1515 memset(&args, 0, sizeof(args));
1516
1517 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1518 return;
1519
1520 switch (frev) {
1521 case 1:
1522 switch (crev) {
1523 case 1:
1524 default:
1525 if (ASIC_IS_AVIVO(rdev))
1526 args.v1.ucCRTC = radeon_crtc->crtc_id;
1527 else {
1528 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
1529 args.v1.ucCRTC = radeon_crtc->crtc_id;
1530 } else {
1531 args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
1532 }
1533 }
1534 switch (radeon_encoder->encoder_id) {
1535 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1536 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1537 args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
1538 break;
1539 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1540 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
1541 if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
1542 args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
1543 else
1544 args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
1545 break;
1546 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1547 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1548 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1549 args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
1550 break;
1551 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1552 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1553 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1554 args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
1555 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
1556 args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
1557 else
1558 args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
1559 break;
1560 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1561 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1562 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1563 args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
1564 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
1565 args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
1566 else
1567 args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
1568 break;
1569 }
1570 break;
1571 case 2:
1572 args.v2.ucCRTC = radeon_crtc->crtc_id;
1573 if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) {
1574 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1575
1576 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
1577 args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
1578 else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
1579 args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
1580 else
1581 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
1582 } else
1583 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
1584 switch (radeon_encoder->encoder_id) {
1585 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1586 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1587 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1588 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1589 dig = radeon_encoder->enc_priv;
1590 switch (dig->dig_encoder) {
1591 case 0:
1592 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1593 break;
1594 case 1:
1595 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1596 break;
1597 case 2:
1598 args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
1599 break;
1600 case 3:
1601 args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
1602 break;
1603 case 4:
1604 args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
1605 break;
1606 case 5:
1607 args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
1608 break;
1609 }
1610 break;
1611 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1612 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
1613 break;
1614 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1615 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1616 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
1617 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
1618 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
1619 else
1620 args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
1621 break;
1622 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1623 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1624 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
1625 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
1626 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
1627 else
1628 args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
1629 break;
1630 }
1631 break;
1632 }
1633 break;
1634 default:
1635 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
1636 return;
1637 }
1638
1639 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1640
1641 /* update scratch regs with new routing */
1642 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
1643}
1644
1645static void
1646atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1647 struct drm_display_mode *mode)
1648{
1649 struct drm_device *dev = encoder->dev;
1650 struct radeon_device *rdev = dev->dev_private;
1651 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1652 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1653
1654 /* Funky macbooks */
1655 if ((dev->pdev->device == 0x71C5) &&
1656 (dev->pdev->subsystem_vendor == 0x106b) &&
1657 (dev->pdev->subsystem_device == 0x0080)) {
1658 if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
1659 uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
1660
1661 lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
1662 lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
1663
1664 WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
1665 }
1666 }
1667
1668 /* set scaler clears this on some chips */
1669 if (ASIC_IS_AVIVO(rdev) &&
1670 (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
1671 if (ASIC_IS_DCE4(rdev)) {
1672 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1673 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
1674 EVERGREEN_INTERLEAVE_EN);
1675 else
1676 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1677 } else {
1678 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1679 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
1680 AVIVO_D1MODE_INTERLEAVE_EN);
1681 else
1682 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1683 }
1684 }
1685}
1686
1687static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1688{
1689 struct drm_device *dev = encoder->dev;
1690 struct radeon_device *rdev = dev->dev_private;
1691 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1692 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1693 struct drm_encoder *test_encoder;
1694 struct radeon_encoder_atom_dig *dig;
1695 uint32_t dig_enc_in_use = 0;
1696
1697 /* DCE4/5 */
1698 if (ASIC_IS_DCE4(rdev)) {
1699 dig = radeon_encoder->enc_priv;
1700 if (ASIC_IS_DCE41(rdev)) {
1701 /* ontario follows DCE4 */
1702 if (rdev->family == CHIP_PALM) {
1703 if (dig->linkb)
1704 return 1;
1705 else
1706 return 0;
1707 } else
1708 /* llano follows DCE3.2 */
1709 return radeon_crtc->crtc_id;
1710 } else {
1711 switch (radeon_encoder->encoder_id) {
1712 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1713 if (dig->linkb)
1714 return 1;
1715 else
1716 return 0;
1717 break;
1718 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1719 if (dig->linkb)
1720 return 3;
1721 else
1722 return 2;
1723 break;
1724 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1725 if (dig->linkb)
1726 return 5;
1727 else
1728 return 4;
1729 break;
1730 }
1731 }
1732 }
1733
1734 /* on DCE32 and encoder can driver any block so just crtc id */
1735 if (ASIC_IS_DCE32(rdev)) {
1736 return radeon_crtc->crtc_id;
1737 }
1738
1739 /* on DCE3 - LVTMA can only be driven by DIGB */
1740 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
1741 struct radeon_encoder *radeon_test_encoder;
1742
1743 if (encoder == test_encoder)
1744 continue;
1745
1746 if (!radeon_encoder_is_digital(test_encoder))
1747 continue;
1748
1749 radeon_test_encoder = to_radeon_encoder(test_encoder);
1750 dig = radeon_test_encoder->enc_priv;
1751
1752 if (dig->dig_encoder >= 0)
1753 dig_enc_in_use |= (1 << dig->dig_encoder);
1754 }
1755
1756 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
1757 if (dig_enc_in_use & 0x2)
1758 DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
1759 return 1;
1760 }
1761 if (!(dig_enc_in_use & 1))
1762 return 0;
1763 return 1;
1764}
1765
1766/* This only needs to be called once at startup */
1767void
1768radeon_atom_encoder_init(struct radeon_device *rdev)
1769{
1770 struct drm_device *dev = rdev->ddev;
1771 struct drm_encoder *encoder;
1772
1773 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1774 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1775 struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
1776
1777 switch (radeon_encoder->encoder_id) {
1778 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1779 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1780 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1781 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1782 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
1783 break;
1784 default:
1785 break;
1786 }
1787
1788 if (ext_encoder && ASIC_IS_DCE41(rdev))
1789 atombios_external_encoder_setup(encoder, ext_encoder,
1790 EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
1791 }
1792}
1793
1794static void
1795radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1796 struct drm_display_mode *mode,
1797 struct drm_display_mode *adjusted_mode)
1798{
1799 struct drm_device *dev = encoder->dev;
1800 struct radeon_device *rdev = dev->dev_private;
1801 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1802 struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
1803
1804 radeon_encoder->pixel_clock = adjusted_mode->clock;
1805
1806 if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
1807 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
1808 atombios_yuv_setup(encoder, true);
1809 else
1810 atombios_yuv_setup(encoder, false);
1811 }
1812
1813 switch (radeon_encoder->encoder_id) {
1814 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1815 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1816 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1817 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
1818 atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
1819 break;
1820 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1821 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1822 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1823 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1824 if (ASIC_IS_DCE4(rdev)) {
1825 /* disable the transmitter */
1826 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1827 /* setup and enable the encoder */
1828 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1829
1830 /* enable the transmitter */
1831 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1832 } else {
1833 /* disable the encoder and transmitter */
1834 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1835 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
1836
1837 /* setup and enable the encoder and transmitter */
1838 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
1839 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1840 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1841 }
1842 break;
1843 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1844 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1845 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1846 atombios_dvo_setup(encoder, ATOM_ENABLE);
1847 break;
1848 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1849 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1850 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1851 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1852 atombios_dac_setup(encoder, ATOM_ENABLE);
1853 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
1854 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1855 atombios_tv_setup(encoder, ATOM_ENABLE);
1856 else
1857 atombios_tv_setup(encoder, ATOM_DISABLE);
1858 }
1859 break;
1860 }
1861
1862 if (ext_encoder) {
1863 if (ASIC_IS_DCE41(rdev))
1864 atombios_external_encoder_setup(encoder, ext_encoder,
1865 EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
1866 else
1867 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1868 }
1869
1870 atombios_apply_encoder_quirks(encoder, adjusted_mode);
1871
1872 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
1873 r600_hdmi_enable(encoder);
1874 r600_hdmi_setmode(encoder, adjusted_mode);
1875 }
1876}
1877
1878static bool
1879atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1880{
1881 struct drm_device *dev = encoder->dev;
1882 struct radeon_device *rdev = dev->dev_private;
1883 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1884 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1885
1886 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
1887 ATOM_DEVICE_CV_SUPPORT |
1888 ATOM_DEVICE_CRT_SUPPORT)) {
1889 DAC_LOAD_DETECTION_PS_ALLOCATION args;
1890 int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
1891 uint8_t frev, crev;
1892
1893 memset(&args, 0, sizeof(args));
1894
1895 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1896 return false;
1897
1898 args.sDacload.ucMisc = 0;
1899
1900 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
1901 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
1902 args.sDacload.ucDacType = ATOM_DAC_A;
1903 else
1904 args.sDacload.ucDacType = ATOM_DAC_B;
1905
1906 if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
1907 args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
1908 else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
1909 args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
1910 else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
1911 args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
1912 if (crev >= 3)
1913 args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
1914 } else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
1915 args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
1916 if (crev >= 3)
1917 args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
1918 }
1919
1920 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1921
1922 return true;
1923 } else
1924 return false;
1925}
1926
1927static enum drm_connector_status
1928radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1929{
1930 struct drm_device *dev = encoder->dev;
1931 struct radeon_device *rdev = dev->dev_private;
1932 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1933 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1934 uint32_t bios_0_scratch;
1935
1936 if (!atombios_dac_load_detect(encoder, connector)) {
1937 DRM_DEBUG_KMS("detect returned false \n");
1938 return connector_status_unknown;
1939 }
1940
1941 if (rdev->family >= CHIP_R600)
1942 bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
1943 else
1944 bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
1945
1946 DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
1947 if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
1948 if (bios_0_scratch & ATOM_S0_CRT1_MASK)
1949 return connector_status_connected;
1950 }
1951 if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
1952 if (bios_0_scratch & ATOM_S0_CRT2_MASK)
1953 return connector_status_connected;
1954 }
1955 if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
1956 if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
1957 return connector_status_connected;
1958 }
1959 if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
1960 if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
1961 return connector_status_connected; /* CTV */
1962 else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
1963 return connector_status_connected; /* STV */
1964 }
1965 return connector_status_disconnected;
1966}
1967
1968static enum drm_connector_status
1969radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1970{
1971 struct drm_device *dev = encoder->dev;
1972 struct radeon_device *rdev = dev->dev_private;
1973 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1974 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1975 struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
1976 u32 bios_0_scratch;
1977
1978 if (!ASIC_IS_DCE4(rdev))
1979 return connector_status_unknown;
1980
1981 if (!ext_encoder)
1982 return connector_status_unknown;
1983
1984 if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
1985 return connector_status_unknown;
1986
1987 /* load detect on the dp bridge */
1988 atombios_external_encoder_setup(encoder, ext_encoder,
1989 EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
1990
1991 bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
1992
1993 DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
1994 if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
1995 if (bios_0_scratch & ATOM_S0_CRT1_MASK)
1996 return connector_status_connected;
1997 }
1998 if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
1999 if (bios_0_scratch & ATOM_S0_CRT2_MASK)
2000 return connector_status_connected;
2001 }
2002 if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
2003 if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
2004 return connector_status_connected;
2005 }
2006 if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
2007 if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
2008 return connector_status_connected; /* CTV */
2009 else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
2010 return connector_status_connected; /* STV */
2011 }
2012 return connector_status_disconnected;
2013}
2014
2015void
2016radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
2017{
2018 struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
2019
2020 if (ext_encoder)
2021 /* ddc_setup on the dp bridge */
2022 atombios_external_encoder_setup(encoder, ext_encoder,
2023 EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
2024
2025}
2026
2027static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
2028{
2029 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2030 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
2031
2032 if ((radeon_encoder->active_device &
2033 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
2034 (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
2035 ENCODER_OBJECT_ID_NONE)) {
2036 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
2037 if (dig)
2038 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
2039 }
2040
2041 radeon_atom_output_lock(encoder, true);
2042 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2043
2044 if (connector) {
2045 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
2046
2047 /* select the clock/data port if it uses a router */
2048 if (radeon_connector->router.cd_valid)
2049 radeon_router_select_cd_port(radeon_connector);
2050
2051 /* turn eDP panel on for mode set */
2052 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
2053 atombios_set_edp_panel_power(connector,
2054 ATOM_TRANSMITTER_ACTION_POWER_ON);
2055 }
2056
2057 /* this is needed for the pll/ss setup to work correctly in some cases */
2058 atombios_set_encoder_crtc_source(encoder);
2059}
2060
2061static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
2062{
2063 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
2064 radeon_atom_output_lock(encoder, false);
2065}
2066
2067static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
2068{
2069 struct drm_device *dev = encoder->dev;
2070 struct radeon_device *rdev = dev->dev_private;
2071 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2072 struct radeon_encoder_atom_dig *dig;
2073
2074 /* check for pre-DCE3 cards with shared encoders;
2075 * can't really use the links individually, so don't disable
2076 * the encoder if it's in use by another connector
2077 */
2078 if (!ASIC_IS_DCE3(rdev)) {
2079 struct drm_encoder *other_encoder;
2080 struct radeon_encoder *other_radeon_encoder;
2081
2082 list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
2083 other_radeon_encoder = to_radeon_encoder(other_encoder);
2084 if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
2085 drm_helper_encoder_in_use(other_encoder))
2086 goto disable_done;
2087 }
2088 }
2089
2090 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2091
2092 switch (radeon_encoder->encoder_id) {
2093 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
2094 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
2095 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
2096 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
2097 atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
2098 break;
2099 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2100 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2101 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2102 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2103 if (ASIC_IS_DCE4(rdev))
2104 /* disable the transmitter */
2105 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
2106 else {
2107 /* disable the encoder and transmitter */
2108 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
2109 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
2110 }
2111 break;
2112 case ENCODER_OBJECT_ID_INTERNAL_DDI:
2113 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
2114 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
2115 atombios_dvo_setup(encoder, ATOM_DISABLE);
2116 break;
2117 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
2118 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
2119 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
2120 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
2121 atombios_dac_setup(encoder, ATOM_DISABLE);
2122 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
2123 atombios_tv_setup(encoder, ATOM_DISABLE);
2124 break;
2125 }
2126
2127disable_done:
2128 if (radeon_encoder_is_digital(encoder)) {
2129 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
2130 r600_hdmi_disable(encoder);
2131 dig = radeon_encoder->enc_priv;
2132 dig->dig_encoder = -1;
2133 }
2134 radeon_encoder->active_device = 0;
2135}
2136
2137/* these are handled by the primary encoders */
2138static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
2139{
2140
2141}
2142
2143static void radeon_atom_ext_commit(struct drm_encoder *encoder)
2144{
2145
2146}
2147
2148static void
2149radeon_atom_ext_mode_set(struct drm_encoder *encoder,
2150 struct drm_display_mode *mode,
2151 struct drm_display_mode *adjusted_mode)
2152{
2153
2154}
2155
2156static void radeon_atom_ext_disable(struct drm_encoder *encoder)
2157{
2158
2159}
2160
2161static void
2162radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
2163{
2164
2165}
2166
2167static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
2168 struct drm_display_mode *mode,
2169 struct drm_display_mode *adjusted_mode)
2170{
2171 return true;
2172}
2173
2174static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
2175 .dpms = radeon_atom_ext_dpms,
2176 .mode_fixup = radeon_atom_ext_mode_fixup,
2177 .prepare = radeon_atom_ext_prepare,
2178 .mode_set = radeon_atom_ext_mode_set,
2179 .commit = radeon_atom_ext_commit,
2180 .disable = radeon_atom_ext_disable,
2181 /* no detect for TMDS/LVDS yet */
2182};
2183
2184static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
2185 .dpms = radeon_atom_encoder_dpms,
2186 .mode_fixup = radeon_atom_mode_fixup,
2187 .prepare = radeon_atom_encoder_prepare,
2188 .mode_set = radeon_atom_encoder_mode_set,
2189 .commit = radeon_atom_encoder_commit,
2190 .disable = radeon_atom_encoder_disable,
2191 .detect = radeon_atom_dig_detect,
2192};
2193
2194static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
2195 .dpms = radeon_atom_encoder_dpms,
2196 .mode_fixup = radeon_atom_mode_fixup,
2197 .prepare = radeon_atom_encoder_prepare,
2198 .mode_set = radeon_atom_encoder_mode_set,
2199 .commit = radeon_atom_encoder_commit,
2200 .detect = radeon_atom_dac_detect,
2201};
2202
2203void radeon_enc_destroy(struct drm_encoder *encoder)
2204{
2205 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2206 kfree(radeon_encoder->enc_priv);
2207 drm_encoder_cleanup(encoder);
2208 kfree(radeon_encoder);
2209}
2210
2211static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
2212 .destroy = radeon_enc_destroy,
2213};
2214
2215struct radeon_encoder_atom_dac *
2216radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
2217{
2218 struct drm_device *dev = radeon_encoder->base.dev;
2219 struct radeon_device *rdev = dev->dev_private;
2220 struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
2221
2222 if (!dac)
2223 return NULL;
2224
2225 dac->tv_std = radeon_atombios_get_tv_info(rdev);
2226 return dac;
2227}
2228
2229struct radeon_encoder_atom_dig *
2230radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
2231{
2232 int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
2233 struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
2234
2235 if (!dig)
2236 return NULL;
2237
2238 /* coherent mode by default */
2239 dig->coherent_mode = true;
2240 dig->dig_encoder = -1;
2241
2242 if (encoder_enum == 2)
2243 dig->linkb = true;
2244 else
2245 dig->linkb = false;
2246
2247 return dig;
2248}
2249
2250void
2251radeon_add_atom_encoder(struct drm_device *dev,
2252 uint32_t encoder_enum,
2253 uint32_t supported_device,
2254 u16 caps)
2255{
2256 struct radeon_device *rdev = dev->dev_private;
2257 struct drm_encoder *encoder;
2258 struct radeon_encoder *radeon_encoder;
2259
2260 /* see if we already added it */
2261 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2262 radeon_encoder = to_radeon_encoder(encoder);
2263 if (radeon_encoder->encoder_enum == encoder_enum) {
2264 radeon_encoder->devices |= supported_device;
2265 return;
2266 }
2267
2268 }
2269
2270 /* add a new one */
2271 radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
2272 if (!radeon_encoder)
2273 return;
2274
2275 encoder = &radeon_encoder->base;
2276 switch (rdev->num_crtc) {
2277 case 1:
2278 encoder->possible_crtcs = 0x1;
2279 break;
2280 case 2:
2281 default:
2282 encoder->possible_crtcs = 0x3;
2283 break;
2284 case 4:
2285 encoder->possible_crtcs = 0xf;
2286 break;
2287 case 6:
2288 encoder->possible_crtcs = 0x3f;
2289 break;
2290 }
2291
2292 radeon_encoder->enc_priv = NULL;
2293
2294 radeon_encoder->encoder_enum = encoder_enum;
2295 radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
2296 radeon_encoder->devices = supported_device;
2297 radeon_encoder->rmx_type = RMX_OFF;
2298 radeon_encoder->underscan_type = UNDERSCAN_OFF;
2299 radeon_encoder->is_ext_encoder = false;
2300 radeon_encoder->caps = caps;
2301
2302 switch (radeon_encoder->encoder_id) {
2303 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
2304 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
2305 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
2306 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
2307 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2308 radeon_encoder->rmx_type = RMX_FULL;
2309 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
2310 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
2311 } else {
2312 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
2313 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
2314 }
2315 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
2316 break;
2317 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
2318 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
2319 radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
2320 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
2321 break;
2322 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
2323 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
2324 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
2325 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
2326 radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
2327 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
2328 break;
2329 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
2330 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
2331 case ENCODER_OBJECT_ID_INTERNAL_DDI:
2332 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2333 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2334 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2335 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2336 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2337 radeon_encoder->rmx_type = RMX_FULL;
2338 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
2339 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
2340 } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
2341 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
2342 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
2343 } else {
2344 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
2345 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
2346 }
2347 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
2348 break;
2349 case ENCODER_OBJECT_ID_SI170B:
2350 case ENCODER_OBJECT_ID_CH7303:
2351 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
2352 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
2353 case ENCODER_OBJECT_ID_TITFP513:
2354 case ENCODER_OBJECT_ID_VT1623:
2355 case ENCODER_OBJECT_ID_HDMI_SI1930:
2356 case ENCODER_OBJECT_ID_TRAVIS:
2357 case ENCODER_OBJECT_ID_NUTMEG:
2358 /* these are handled by the primary encoders */
2359 radeon_encoder->is_ext_encoder = true;
2360 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2361 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
2362 else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
2363 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
2364 else
2365 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
2366 drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
2367 break;
2368 }
2369}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index ed406e8404a3..1d603a3335db 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -157,6 +157,57 @@ int sumo_get_temp(struct radeon_device *rdev)
157 return actual_temp * 1000; 157 return actual_temp * 1000;
158} 158}
159 159
160void sumo_pm_init_profile(struct radeon_device *rdev)
161{
162 int idx;
163
164 /* default */
165 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
166 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
167 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
168 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
169
170 /* low,mid sh/mh */
171 if (rdev->flags & RADEON_IS_MOBILITY)
172 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
173 else
174 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
175
176 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
177 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
178 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
179 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
180
181 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
182 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
183 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
184 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
185
186 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
187 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
188 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
189 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
190
191 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
192 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
193 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
194 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
195
196 /* high sh/mh */
197 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
198 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
199 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
200 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
201 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
202 rdev->pm.power_state[idx].num_clock_modes - 1;
203
204 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
205 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
206 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
207 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
208 rdev->pm.power_state[idx].num_clock_modes - 1;
209}
210
160void evergreen_pm_misc(struct radeon_device *rdev) 211void evergreen_pm_misc(struct radeon_device *rdev)
161{ 212{
162 int req_ps_idx = rdev->pm.requested_power_state_index; 213 int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -353,6 +404,7 @@ void evergreen_hpd_init(struct radeon_device *rdev)
353 default: 404 default:
354 break; 405 break;
355 } 406 }
407 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
356 } 408 }
357 if (rdev->irq.installed) 409 if (rdev->irq.installed)
358 evergreen_irq_set(rdev); 410 evergreen_irq_set(rdev);
@@ -893,7 +945,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
893 u32 tmp; 945 u32 tmp;
894 int r; 946 int r;
895 947
896 if (rdev->gart.table.vram.robj == NULL) { 948 if (rdev->gart.robj == NULL) {
897 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 949 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
898 return -EINVAL; 950 return -EINVAL;
899 } 951 }
@@ -945,7 +997,6 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
945void evergreen_pcie_gart_disable(struct radeon_device *rdev) 997void evergreen_pcie_gart_disable(struct radeon_device *rdev)
946{ 998{
947 u32 tmp; 999 u32 tmp;
948 int r;
949 1000
950 /* Disable all tables */ 1001 /* Disable all tables */
951 WREG32(VM_CONTEXT0_CNTL, 0); 1002 WREG32(VM_CONTEXT0_CNTL, 0);
@@ -965,14 +1016,7 @@ void evergreen_pcie_gart_disable(struct radeon_device *rdev)
965 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 1016 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
966 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 1017 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
967 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 1018 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
968 if (rdev->gart.table.vram.robj) { 1019 radeon_gart_table_vram_unpin(rdev);
969 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
970 if (likely(r == 0)) {
971 radeon_bo_kunmap(rdev->gart.table.vram.robj);
972 radeon_bo_unpin(rdev->gart.table.vram.robj);
973 radeon_bo_unreserve(rdev->gart.table.vram.robj);
974 }
975 }
976} 1020}
977 1021
978void evergreen_pcie_gart_fini(struct radeon_device *rdev) 1022void evergreen_pcie_gart_fini(struct radeon_device *rdev)
@@ -1226,7 +1270,7 @@ void evergreen_mc_program(struct radeon_device *rdev)
1226 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1270 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1227 rdev->mc.vram_end >> 12); 1271 rdev->mc.vram_end >> 12);
1228 } 1272 }
1229 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); 1273 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1230 if (rdev->flags & RADEON_IS_IGP) { 1274 if (rdev->flags & RADEON_IS_IGP) {
1231 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; 1275 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1232 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; 1276 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
@@ -3031,6 +3075,10 @@ static int evergreen_startup(struct radeon_device *rdev)
3031 } 3075 }
3032 } 3076 }
3033 3077
3078 r = r600_vram_scratch_init(rdev);
3079 if (r)
3080 return r;
3081
3034 evergreen_mc_program(rdev); 3082 evergreen_mc_program(rdev);
3035 if (rdev->flags & RADEON_IS_AGP) { 3083 if (rdev->flags & RADEON_IS_AGP) {
3036 evergreen_agp_enable(rdev); 3084 evergreen_agp_enable(rdev);
@@ -3235,6 +3283,7 @@ void evergreen_fini(struct radeon_device *rdev)
3235 radeon_ib_pool_fini(rdev); 3283 radeon_ib_pool_fini(rdev);
3236 radeon_irq_kms_fini(rdev); 3284 radeon_irq_kms_fini(rdev);
3237 evergreen_pcie_gart_fini(rdev); 3285 evergreen_pcie_gart_fini(rdev);
3286 r600_vram_scratch_fini(rdev);
3238 radeon_gem_fini(rdev); 3287 radeon_gem_fini(rdev);
3239 radeon_fence_driver_fini(rdev); 3288 radeon_fence_driver_fini(rdev);
3240 radeon_agp_fini(rdev); 3289 radeon_agp_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index dcf11bbc06d9..914e5af84163 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -94,6 +94,15 @@ cp_set_surface_sync(struct radeon_device *rdev,
94 else 94 else
95 cp_coher_size = ((size + 255) >> 8); 95 cp_coher_size = ((size + 255) >> 8);
96 96
97 if (rdev->family >= CHIP_CAYMAN) {
98 /* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync
99 * to the RB directly. For IBs, the CP programs this as part of the
100 * surface_sync packet.
101 */
102 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
103 radeon_ring_write(rdev, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
104 radeon_ring_write(rdev, 0); /* CP_COHER_CNTL2 */
105 }
97 radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); 106 radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
98 radeon_ring_write(rdev, sync_type); 107 radeon_ring_write(rdev, sync_type);
99 radeon_ring_write(rdev, cp_coher_size); 108 radeon_ring_write(rdev, cp_coher_size);
@@ -174,7 +183,7 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
174static void 183static void
175set_tex_resource(struct radeon_device *rdev, 184set_tex_resource(struct radeon_device *rdev,
176 int format, int w, int h, int pitch, 185 int format, int w, int h, int pitch,
177 u64 gpu_addr) 186 u64 gpu_addr, u32 size)
178{ 187{
179 u32 sq_tex_resource_word0, sq_tex_resource_word1; 188 u32 sq_tex_resource_word0, sq_tex_resource_word1;
180 u32 sq_tex_resource_word4, sq_tex_resource_word7; 189 u32 sq_tex_resource_word4, sq_tex_resource_word7;
@@ -196,6 +205,9 @@ set_tex_resource(struct radeon_device *rdev,
196 sq_tex_resource_word7 = format | 205 sq_tex_resource_word7 = format |
197 S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE); 206 S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE);
198 207
208 cp_set_surface_sync(rdev,
209 PACKET3_TC_ACTION_ENA, size, gpu_addr);
210
199 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); 211 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
200 radeon_ring_write(rdev, 0); 212 radeon_ring_write(rdev, 0);
201 radeon_ring_write(rdev, sq_tex_resource_word0); 213 radeon_ring_write(rdev, sq_tex_resource_word0);
@@ -613,11 +625,13 @@ int evergreen_blit_init(struct radeon_device *rdev)
613 rdev->r600_blit.primitives.set_default_state = set_default_state; 625 rdev->r600_blit.primitives.set_default_state = set_default_state;
614 626
615 rdev->r600_blit.ring_size_common = 55; /* shaders + def state */ 627 rdev->r600_blit.ring_size_common = 55; /* shaders + def state */
616 rdev->r600_blit.ring_size_common += 10; /* fence emit for VB IB */ 628 rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
617 rdev->r600_blit.ring_size_common += 5; /* done copy */ 629 rdev->r600_blit.ring_size_common += 5; /* done copy */
618 rdev->r600_blit.ring_size_common += 10; /* fence emit for done copy */ 630 rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
619 631
620 rdev->r600_blit.ring_size_per_loop = 74; 632 rdev->r600_blit.ring_size_per_loop = 74;
633 if (rdev->family >= CHIP_CAYMAN)
634 rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */
621 635
622 rdev->r600_blit.max_dim = 16384; 636 rdev->r600_blit.max_dim = 16384;
623 637
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index fdb93f884575..0e5799857465 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -262,8 +262,11 @@ int ni_mc_load_microcode(struct radeon_device *rdev)
262 WREG32(MC_SEQ_SUP_CNTL, 0x00000001); 262 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
263 263
264 /* wait for training to complete */ 264 /* wait for training to complete */
265 while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)) 265 for (i = 0; i < rdev->usec_timeout; i++) {
266 udelay(10); 266 if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
267 break;
268 udelay(1);
269 }
267 270
268 if (running) 271 if (running)
269 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); 272 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
@@ -933,7 +936,7 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
933{ 936{
934 int r; 937 int r;
935 938
936 if (rdev->gart.table.vram.robj == NULL) { 939 if (rdev->gart.robj == NULL) {
937 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 940 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
938 return -EINVAL; 941 return -EINVAL;
939 } 942 }
@@ -978,8 +981,6 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
978 981
979void cayman_pcie_gart_disable(struct radeon_device *rdev) 982void cayman_pcie_gart_disable(struct radeon_device *rdev)
980{ 983{
981 int r;
982
983 /* Disable all tables */ 984 /* Disable all tables */
984 WREG32(VM_CONTEXT0_CNTL, 0); 985 WREG32(VM_CONTEXT0_CNTL, 0);
985 WREG32(VM_CONTEXT1_CNTL, 0); 986 WREG32(VM_CONTEXT1_CNTL, 0);
@@ -995,14 +996,7 @@ void cayman_pcie_gart_disable(struct radeon_device *rdev)
995 WREG32(VM_L2_CNTL2, 0); 996 WREG32(VM_L2_CNTL2, 0);
996 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 997 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
997 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 998 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
998 if (rdev->gart.table.vram.robj) { 999 radeon_gart_table_vram_unpin(rdev);
999 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
1000 if (likely(r == 0)) {
1001 radeon_bo_kunmap(rdev->gart.table.vram.robj);
1002 radeon_bo_unpin(rdev->gart.table.vram.robj);
1003 radeon_bo_unreserve(rdev->gart.table.vram.robj);
1004 }
1005 }
1006} 1000}
1007 1001
1008void cayman_pcie_gart_fini(struct radeon_device *rdev) 1002void cayman_pcie_gart_fini(struct radeon_device *rdev)
@@ -1362,6 +1356,10 @@ static int cayman_startup(struct radeon_device *rdev)
1362 return r; 1356 return r;
1363 } 1357 }
1364 1358
1359 r = r600_vram_scratch_init(rdev);
1360 if (r)
1361 return r;
1362
1365 evergreen_mc_program(rdev); 1363 evergreen_mc_program(rdev);
1366 r = cayman_pcie_gart_enable(rdev); 1364 r = cayman_pcie_gart_enable(rdev);
1367 if (r) 1365 if (r)
@@ -1557,6 +1555,7 @@ void cayman_fini(struct radeon_device *rdev)
1557 radeon_ib_pool_fini(rdev); 1555 radeon_ib_pool_fini(rdev);
1558 radeon_irq_kms_fini(rdev); 1556 radeon_irq_kms_fini(rdev);
1559 cayman_pcie_gart_fini(rdev); 1557 cayman_pcie_gart_fini(rdev);
1558 r600_vram_scratch_fini(rdev);
1560 radeon_gem_fini(rdev); 1559 radeon_gem_fini(rdev);
1561 radeon_fence_driver_fini(rdev); 1560 radeon_fence_driver_fini(rdev);
1562 radeon_bo_fini(rdev); 1561 radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index cbf49f4f408e..ad158ea49901 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -537,6 +537,7 @@ void r100_hpd_init(struct radeon_device *rdev)
537 default: 537 default:
538 break; 538 break;
539 } 539 }
540 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
540 } 541 }
541 if (rdev->irq.installed) 542 if (rdev->irq.installed)
542 r100_irq_set(rdev); 543 r100_irq_set(rdev);
@@ -577,7 +578,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
577{ 578{
578 int r; 579 int r;
579 580
580 if (rdev->gart.table.ram.ptr) { 581 if (rdev->gart.ptr) {
581 WARN(1, "R100 PCI GART already initialized\n"); 582 WARN(1, "R100 PCI GART already initialized\n");
582 return 0; 583 return 0;
583 } 584 }
@@ -636,10 +637,12 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
636 637
637int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 638int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
638{ 639{
640 u32 *gtt = rdev->gart.ptr;
641
639 if (i < 0 || i > rdev->gart.num_gpu_pages) { 642 if (i < 0 || i > rdev->gart.num_gpu_pages) {
640 return -EINVAL; 643 return -EINVAL;
641 } 644 }
642 rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr)); 645 gtt[i] = cpu_to_le32(lower_32_bits(addr));
643 return 0; 646 return 0;
644} 647}
645 648
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 33f2b68c680b..400b26df652a 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -74,7 +74,7 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
74 74
75int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 75int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
76{ 76{
77 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; 77 void __iomem *ptr = rdev->gart.ptr;
78 78
79 if (i < 0 || i > rdev->gart.num_gpu_pages) { 79 if (i < 0 || i > rdev->gart.num_gpu_pages) {
80 return -EINVAL; 80 return -EINVAL;
@@ -93,7 +93,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
93{ 93{
94 int r; 94 int r;
95 95
96 if (rdev->gart.table.vram.robj) { 96 if (rdev->gart.robj) {
97 WARN(1, "RV370 PCIE GART already initialized\n"); 97 WARN(1, "RV370 PCIE GART already initialized\n");
98 return 0; 98 return 0;
99 } 99 }
@@ -116,7 +116,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
116 uint32_t tmp; 116 uint32_t tmp;
117 int r; 117 int r;
118 118
119 if (rdev->gart.table.vram.robj == NULL) { 119 if (rdev->gart.robj == NULL) {
120 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 120 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
121 return -EINVAL; 121 return -EINVAL;
122 } 122 }
@@ -154,7 +154,6 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
154void rv370_pcie_gart_disable(struct radeon_device *rdev) 154void rv370_pcie_gart_disable(struct radeon_device *rdev)
155{ 155{
156 u32 tmp; 156 u32 tmp;
157 int r;
158 157
159 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); 158 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
160 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); 159 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
@@ -163,14 +162,7 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
163 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 162 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
164 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 163 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
165 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); 164 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
166 if (rdev->gart.table.vram.robj) { 165 radeon_gart_table_vram_unpin(rdev);
167 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
168 if (likely(r == 0)) {
169 radeon_bo_kunmap(rdev->gart.table.vram.robj);
170 radeon_bo_unpin(rdev->gart.table.vram.robj);
171 radeon_bo_unreserve(rdev->gart.table.vram.robj);
172 }
173 }
174} 166}
175 167
176void rv370_pcie_gart_fini(struct radeon_device *rdev) 168void rv370_pcie_gart_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 4e777c1e4b7b..9cdda0b3b081 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -288,24 +288,6 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev)
288 pcie_lanes); 288 pcie_lanes);
289} 289}
290 290
291static int r600_pm_get_type_index(struct radeon_device *rdev,
292 enum radeon_pm_state_type ps_type,
293 int instance)
294{
295 int i;
296 int found_instance = -1;
297
298 for (i = 0; i < rdev->pm.num_power_states; i++) {
299 if (rdev->pm.power_state[i].type == ps_type) {
300 found_instance++;
301 if (found_instance == instance)
302 return i;
303 }
304 }
305 /* return default if no match */
306 return rdev->pm.default_power_state_index;
307}
308
309void rs780_pm_init_profile(struct radeon_device *rdev) 291void rs780_pm_init_profile(struct radeon_device *rdev)
310{ 292{
311 if (rdev->pm.num_power_states == 2) { 293 if (rdev->pm.num_power_states == 2) {
@@ -421,6 +403,8 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
421 403
422void r600_pm_init_profile(struct radeon_device *rdev) 404void r600_pm_init_profile(struct radeon_device *rdev)
423{ 405{
406 int idx;
407
424 if (rdev->family == CHIP_R600) { 408 if (rdev->family == CHIP_R600) {
425 /* XXX */ 409 /* XXX */
426 /* default */ 410 /* default */
@@ -502,81 +486,43 @@ void r600_pm_init_profile(struct radeon_device *rdev)
502 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 486 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
503 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; 487 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
504 /* low sh */ 488 /* low sh */
505 if (rdev->flags & RADEON_IS_MOBILITY) { 489 if (rdev->flags & RADEON_IS_MOBILITY)
506 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 490 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
507 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 491 else
508 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 492 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
509 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 493 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
510 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 494 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
511 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 495 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
512 } else { 496 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
513 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
514 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
515 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
516 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
517 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
518 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
519 }
520 /* mid sh */ 497 /* mid sh */
521 if (rdev->flags & RADEON_IS_MOBILITY) { 498 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
522 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 499 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
523 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 500 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
524 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 501 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
525 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
526 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
527 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
528 } else {
529 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
530 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
531 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
532 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
533 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
534 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
535 }
536 /* high sh */ 502 /* high sh */
537 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 503 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
538 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 504 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
539 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 505 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
540 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
541 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 506 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
542 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; 507 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
543 /* low mh */ 508 /* low mh */
544 if (rdev->flags & RADEON_IS_MOBILITY) { 509 if (rdev->flags & RADEON_IS_MOBILITY)
545 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 510 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
546 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 511 else
547 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 512 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
548 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 513 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
549 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 514 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
550 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 515 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
551 } else { 516 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
552 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
553 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
554 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
555 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
556 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
557 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
558 }
559 /* mid mh */ 517 /* mid mh */
560 if (rdev->flags & RADEON_IS_MOBILITY) { 518 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
561 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 519 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
562 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 520 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
563 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 521 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
564 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
565 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
566 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
567 } else {
568 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
569 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
570 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
571 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
572 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
573 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
574 }
575 /* high mh */ 522 /* high mh */
576 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 523 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
577 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); 524 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
578 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 525 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
579 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
580 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 526 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
581 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; 527 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
582 } 528 }
@@ -763,13 +709,14 @@ void r600_hpd_init(struct radeon_device *rdev)
763 struct drm_device *dev = rdev->ddev; 709 struct drm_device *dev = rdev->ddev;
764 struct drm_connector *connector; 710 struct drm_connector *connector;
765 711
766 if (ASIC_IS_DCE3(rdev)) { 712 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
767 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa); 713 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
768 if (ASIC_IS_DCE32(rdev)) 714
769 tmp |= DC_HPDx_EN; 715 if (ASIC_IS_DCE3(rdev)) {
716 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
717 if (ASIC_IS_DCE32(rdev))
718 tmp |= DC_HPDx_EN;
770 719
771 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
772 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
773 switch (radeon_connector->hpd.hpd) { 720 switch (radeon_connector->hpd.hpd) {
774 case RADEON_HPD_1: 721 case RADEON_HPD_1:
775 WREG32(DC_HPD1_CONTROL, tmp); 722 WREG32(DC_HPD1_CONTROL, tmp);
@@ -799,10 +746,7 @@ void r600_hpd_init(struct radeon_device *rdev)
799 default: 746 default:
800 break; 747 break;
801 } 748 }
802 } 749 } else {
803 } else {
804 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
805 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
806 switch (radeon_connector->hpd.hpd) { 750 switch (radeon_connector->hpd.hpd) {
807 case RADEON_HPD_1: 751 case RADEON_HPD_1:
808 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); 752 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
@@ -820,6 +764,7 @@ void r600_hpd_init(struct radeon_device *rdev)
820 break; 764 break;
821 } 765 }
822 } 766 }
767 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
823 } 768 }
824 if (rdev->irq.installed) 769 if (rdev->irq.installed)
825 r600_irq_set(rdev); 770 r600_irq_set(rdev);
@@ -897,7 +842,7 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
897 /* flush hdp cache so updates hit vram */ 842 /* flush hdp cache so updates hit vram */
898 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && 843 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
899 !(rdev->flags & RADEON_IS_AGP)) { 844 !(rdev->flags & RADEON_IS_AGP)) {
900 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; 845 void __iomem *ptr = (void *)rdev->gart.ptr;
901 u32 tmp; 846 u32 tmp;
902 847
903 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 848 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
@@ -932,7 +877,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
932{ 877{
933 int r; 878 int r;
934 879
935 if (rdev->gart.table.vram.robj) { 880 if (rdev->gart.robj) {
936 WARN(1, "R600 PCIE GART already initialized\n"); 881 WARN(1, "R600 PCIE GART already initialized\n");
937 return 0; 882 return 0;
938 } 883 }
@@ -949,7 +894,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
949 u32 tmp; 894 u32 tmp;
950 int r, i; 895 int r, i;
951 896
952 if (rdev->gart.table.vram.robj == NULL) { 897 if (rdev->gart.robj == NULL) {
953 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 898 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
954 return -EINVAL; 899 return -EINVAL;
955 } 900 }
@@ -1004,7 +949,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
1004void r600_pcie_gart_disable(struct radeon_device *rdev) 949void r600_pcie_gart_disable(struct radeon_device *rdev)
1005{ 950{
1006 u32 tmp; 951 u32 tmp;
1007 int i, r; 952 int i;
1008 953
1009 /* Disable all tables */ 954 /* Disable all tables */
1010 for (i = 0; i < 7; i++) 955 for (i = 0; i < 7; i++)
@@ -1031,14 +976,7 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
1031 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 976 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1032 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); 977 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1033 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 978 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1034 if (rdev->gart.table.vram.robj) { 979 radeon_gart_table_vram_unpin(rdev);
1035 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
1036 if (likely(r == 0)) {
1037 radeon_bo_kunmap(rdev->gart.table.vram.robj);
1038 radeon_bo_unpin(rdev->gart.table.vram.robj);
1039 radeon_bo_unreserve(rdev->gart.table.vram.robj);
1040 }
1041 }
1042} 980}
1043 981
1044void r600_pcie_gart_fini(struct radeon_device *rdev) 982void r600_pcie_gart_fini(struct radeon_device *rdev)
@@ -1138,7 +1076,7 @@ static void r600_mc_program(struct radeon_device *rdev)
1138 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); 1076 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1139 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); 1077 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1140 } 1078 }
1141 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); 1079 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1142 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; 1080 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1143 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 1081 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1144 WREG32(MC_VM_FB_LOCATION, tmp); 1082 WREG32(MC_VM_FB_LOCATION, tmp);
@@ -1277,6 +1215,53 @@ int r600_mc_init(struct radeon_device *rdev)
1277 return 0; 1215 return 0;
1278} 1216}
1279 1217
1218int r600_vram_scratch_init(struct radeon_device *rdev)
1219{
1220 int r;
1221
1222 if (rdev->vram_scratch.robj == NULL) {
1223 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1224 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1225 &rdev->vram_scratch.robj);
1226 if (r) {
1227 return r;
1228 }
1229 }
1230
1231 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1232 if (unlikely(r != 0))
1233 return r;
1234 r = radeon_bo_pin(rdev->vram_scratch.robj,
1235 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1236 if (r) {
1237 radeon_bo_unreserve(rdev->vram_scratch.robj);
1238 return r;
1239 }
1240 r = radeon_bo_kmap(rdev->vram_scratch.robj,
1241 (void **)&rdev->vram_scratch.ptr);
1242 if (r)
1243 radeon_bo_unpin(rdev->vram_scratch.robj);
1244 radeon_bo_unreserve(rdev->vram_scratch.robj);
1245
1246 return r;
1247}
1248
1249void r600_vram_scratch_fini(struct radeon_device *rdev)
1250{
1251 int r;
1252
1253 if (rdev->vram_scratch.robj == NULL) {
1254 return;
1255 }
1256 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1257 if (likely(r == 0)) {
1258 radeon_bo_kunmap(rdev->vram_scratch.robj);
1259 radeon_bo_unpin(rdev->vram_scratch.robj);
1260 radeon_bo_unreserve(rdev->vram_scratch.robj);
1261 }
1262 radeon_bo_unref(&rdev->vram_scratch.robj);
1263}
1264
1280/* We doesn't check that the GPU really needs a reset we simply do the 1265/* We doesn't check that the GPU really needs a reset we simply do the
1281 * reset, it's up to the caller to determine if the GPU needs one. We 1266 * reset, it's up to the caller to determine if the GPU needs one. We
1282 * might add an helper function to check that. 1267 * might add an helper function to check that.
@@ -2332,6 +2317,14 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
2332 if (rdev->wb.use_event) { 2317 if (rdev->wb.use_event) {
2333 u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + 2318 u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
2334 (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base); 2319 (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
2320 /* flush read cache over gart */
2321 radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
2322 radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
2323 PACKET3_VC_ACTION_ENA |
2324 PACKET3_SH_ACTION_ENA);
2325 radeon_ring_write(rdev, 0xFFFFFFFF);
2326 radeon_ring_write(rdev, 0);
2327 radeon_ring_write(rdev, 10); /* poll interval */
2335 /* EVENT_WRITE_EOP - flush caches, send int */ 2328 /* EVENT_WRITE_EOP - flush caches, send int */
2336 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 2329 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2337 radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 2330 radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
@@ -2340,6 +2333,14 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
2340 radeon_ring_write(rdev, fence->seq); 2333 radeon_ring_write(rdev, fence->seq);
2341 radeon_ring_write(rdev, 0); 2334 radeon_ring_write(rdev, 0);
2342 } else { 2335 } else {
2336 /* flush read cache over gart */
2337 radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
2338 radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
2339 PACKET3_VC_ACTION_ENA |
2340 PACKET3_SH_ACTION_ENA);
2341 radeon_ring_write(rdev, 0xFFFFFFFF);
2342 radeon_ring_write(rdev, 0);
2343 radeon_ring_write(rdev, 10); /* poll interval */
2343 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); 2344 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2344 radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); 2345 radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2345 /* wait for 3D idle clean */ 2346 /* wait for 3D idle clean */
@@ -2421,6 +2422,10 @@ int r600_startup(struct radeon_device *rdev)
2421 } 2422 }
2422 } 2423 }
2423 2424
2425 r = r600_vram_scratch_init(rdev);
2426 if (r)
2427 return r;
2428
2424 r600_mc_program(rdev); 2429 r600_mc_program(rdev);
2425 if (rdev->flags & RADEON_IS_AGP) { 2430 if (rdev->flags & RADEON_IS_AGP) {
2426 r600_agp_enable(rdev); 2431 r600_agp_enable(rdev);
@@ -2641,6 +2646,7 @@ void r600_fini(struct radeon_device *rdev)
2641 radeon_ib_pool_fini(rdev); 2646 radeon_ib_pool_fini(rdev);
2642 radeon_irq_kms_fini(rdev); 2647 radeon_irq_kms_fini(rdev);
2643 r600_pcie_gart_fini(rdev); 2648 r600_pcie_gart_fini(rdev);
2649 r600_vram_scratch_fini(rdev);
2644 radeon_agp_fini(rdev); 2650 radeon_agp_fini(rdev);
2645 radeon_gem_fini(rdev); 2651 radeon_gem_fini(rdev);
2646 radeon_fence_driver_fini(rdev); 2652 radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index c4cf1308d4a1..e09d2818f949 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -201,7 +201,7 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
201static void 201static void
202set_tex_resource(struct radeon_device *rdev, 202set_tex_resource(struct radeon_device *rdev,
203 int format, int w, int h, int pitch, 203 int format, int w, int h, int pitch,
204 u64 gpu_addr) 204 u64 gpu_addr, u32 size)
205{ 205{
206 uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; 206 uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
207 207
@@ -222,6 +222,9 @@ set_tex_resource(struct radeon_device *rdev,
222 S_038010_DST_SEL_Z(SQ_SEL_Z) | 222 S_038010_DST_SEL_Z(SQ_SEL_Z) |
223 S_038010_DST_SEL_W(SQ_SEL_W); 223 S_038010_DST_SEL_W(SQ_SEL_W);
224 224
225 cp_set_surface_sync(rdev,
226 PACKET3_TC_ACTION_ENA, size, gpu_addr);
227
225 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); 228 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
226 radeon_ring_write(rdev, 0); 229 radeon_ring_write(rdev, 0);
227 radeon_ring_write(rdev, sq_tex_resource_word0); 230 radeon_ring_write(rdev, sq_tex_resource_word0);
@@ -500,9 +503,9 @@ int r600_blit_init(struct radeon_device *rdev)
500 rdev->r600_blit.primitives.set_default_state = set_default_state; 503 rdev->r600_blit.primitives.set_default_state = set_default_state;
501 504
502 rdev->r600_blit.ring_size_common = 40; /* shaders + def state */ 505 rdev->r600_blit.ring_size_common = 40; /* shaders + def state */
503 rdev->r600_blit.ring_size_common += 10; /* fence emit for VB IB */ 506 rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
504 rdev->r600_blit.ring_size_common += 5; /* done copy */ 507 rdev->r600_blit.ring_size_common += 5; /* done copy */
505 rdev->r600_blit.ring_size_common += 10; /* fence emit for done copy */ 508 rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
506 509
507 rdev->r600_blit.ring_size_per_loop = 76; 510 rdev->r600_blit.ring_size_per_loop = 76;
508 /* set_render_target emits 2 extra dwords on rv6xx */ 511 /* set_render_target emits 2 extra dwords on rv6xx */
@@ -760,10 +763,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
760 vb[11] = i2f(h); 763 vb[11] = i2f(h);
761 764
762 rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8, 765 rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
763 w, h, w, src_gpu_addr); 766 w, h, w, src_gpu_addr, size_in_bytes);
764 rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
765 PACKET3_TC_ACTION_ENA,
766 size_in_bytes, src_gpu_addr);
767 rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8, 767 rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
768 w, h, dst_gpu_addr); 768 w, h, dst_gpu_addr);
769 rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h); 769 rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index e3170c794c1d..fc5a1d642cb5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -93,6 +93,7 @@ extern int radeon_audio;
93extern int radeon_disp_priority; 93extern int radeon_disp_priority;
94extern int radeon_hw_i2c; 94extern int radeon_hw_i2c;
95extern int radeon_pcie_gen2; 95extern int radeon_pcie_gen2;
96extern int radeon_msi;
96 97
97/* 98/*
98 * Copy from radeon_drv.h so we don't have to include both and have conflicting 99 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -306,30 +307,17 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
306 */ 307 */
307struct radeon_mc; 308struct radeon_mc;
308 309
309struct radeon_gart_table_ram {
310 volatile uint32_t *ptr;
311};
312
313struct radeon_gart_table_vram {
314 struct radeon_bo *robj;
315 volatile uint32_t *ptr;
316};
317
318union radeon_gart_table {
319 struct radeon_gart_table_ram ram;
320 struct radeon_gart_table_vram vram;
321};
322
323#define RADEON_GPU_PAGE_SIZE 4096 310#define RADEON_GPU_PAGE_SIZE 4096
324#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) 311#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
325#define RADEON_GPU_PAGE_SHIFT 12 312#define RADEON_GPU_PAGE_SHIFT 12
326 313
327struct radeon_gart { 314struct radeon_gart {
328 dma_addr_t table_addr; 315 dma_addr_t table_addr;
316 struct radeon_bo *robj;
317 void *ptr;
329 unsigned num_gpu_pages; 318 unsigned num_gpu_pages;
330 unsigned num_cpu_pages; 319 unsigned num_cpu_pages;
331 unsigned table_size; 320 unsigned table_size;
332 union radeon_gart_table table;
333 struct page **pages; 321 struct page **pages;
334 dma_addr_t *pages_addr; 322 dma_addr_t *pages_addr;
335 bool *ttm_alloced; 323 bool *ttm_alloced;
@@ -340,6 +328,8 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
340void radeon_gart_table_ram_free(struct radeon_device *rdev); 328void radeon_gart_table_ram_free(struct radeon_device *rdev);
341int radeon_gart_table_vram_alloc(struct radeon_device *rdev); 329int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
342void radeon_gart_table_vram_free(struct radeon_device *rdev); 330void radeon_gart_table_vram_free(struct radeon_device *rdev);
331int radeon_gart_table_vram_pin(struct radeon_device *rdev);
332void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
343int radeon_gart_init(struct radeon_device *rdev); 333int radeon_gart_init(struct radeon_device *rdev);
344void radeon_gart_fini(struct radeon_device *rdev); 334void radeon_gart_fini(struct radeon_device *rdev);
345void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, 335void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
@@ -347,6 +337,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
347int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, 337int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
348 int pages, struct page **pagelist, 338 int pages, struct page **pagelist,
349 dma_addr_t *dma_addr); 339 dma_addr_t *dma_addr);
340void radeon_gart_restore(struct radeon_device *rdev);
350 341
351 342
352/* 343/*
@@ -437,25 +428,26 @@ union radeon_irq_stat_regs {
437 struct evergreen_irq_stat_regs evergreen; 428 struct evergreen_irq_stat_regs evergreen;
438}; 429};
439 430
431#define RADEON_MAX_HPD_PINS 6
432#define RADEON_MAX_CRTCS 6
433#define RADEON_MAX_HDMI_BLOCKS 2
434
440struct radeon_irq { 435struct radeon_irq {
441 bool installed; 436 bool installed;
442 bool sw_int; 437 bool sw_int;
443 /* FIXME: use a define max crtc rather than hardcode it */ 438 bool crtc_vblank_int[RADEON_MAX_CRTCS];
444 bool crtc_vblank_int[6]; 439 bool pflip[RADEON_MAX_CRTCS];
445 bool pflip[6];
446 wait_queue_head_t vblank_queue; 440 wait_queue_head_t vblank_queue;
447 /* FIXME: use defines for max hpd/dacs */ 441 bool hpd[RADEON_MAX_HPD_PINS];
448 bool hpd[6];
449 bool gui_idle; 442 bool gui_idle;
450 bool gui_idle_acked; 443 bool gui_idle_acked;
451 wait_queue_head_t idle_queue; 444 wait_queue_head_t idle_queue;
452 /* FIXME: use defines for max HDMI blocks */ 445 bool hdmi[RADEON_MAX_HDMI_BLOCKS];
453 bool hdmi[2];
454 spinlock_t sw_lock; 446 spinlock_t sw_lock;
455 int sw_refcount; 447 int sw_refcount;
456 union radeon_irq_stat_regs stat_regs; 448 union radeon_irq_stat_regs stat_regs;
457 spinlock_t pflip_lock[6]; 449 spinlock_t pflip_lock[RADEON_MAX_CRTCS];
458 int pflip_refcount[6]; 450 int pflip_refcount[RADEON_MAX_CRTCS];
459}; 451};
460 452
461int radeon_irq_kms_init(struct radeon_device *rdev); 453int radeon_irq_kms_init(struct radeon_device *rdev);
@@ -533,7 +525,7 @@ struct r600_blit_cp_primitives {
533 void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr); 525 void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
534 void (*set_tex_resource)(struct radeon_device *rdev, 526 void (*set_tex_resource)(struct radeon_device *rdev,
535 int format, int w, int h, int pitch, 527 int format, int w, int h, int pitch,
536 u64 gpu_addr); 528 u64 gpu_addr, u32 size);
537 void (*set_scissors)(struct radeon_device *rdev, int x1, int y1, 529 void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
538 int x2, int y2); 530 int x2, int y2);
539 void (*draw_auto)(struct radeon_device *rdev); 531 void (*draw_auto)(struct radeon_device *rdev);
@@ -792,8 +784,7 @@ struct radeon_pm_clock_info {
792 784
793struct radeon_power_state { 785struct radeon_power_state {
794 enum radeon_pm_state_type type; 786 enum radeon_pm_state_type type;
795 /* XXX: use a define for num clock modes */ 787 struct radeon_pm_clock_info *clock_info;
796 struct radeon_pm_clock_info clock_info[8];
797 /* number of valid clock modes in this power state */ 788 /* number of valid clock modes in this power state */
798 int num_clock_modes; 789 int num_clock_modes;
799 struct radeon_pm_clock_info *default_clock_mode; 790 struct radeon_pm_clock_info *default_clock_mode;
@@ -863,6 +854,9 @@ struct radeon_pm {
863 struct device *int_hwmon_dev; 854 struct device *int_hwmon_dev;
864}; 855};
865 856
857int radeon_pm_get_type_index(struct radeon_device *rdev,
858 enum radeon_pm_state_type ps_type,
859 int instance);
866 860
867/* 861/*
868 * Benchmarking 862 * Benchmarking
@@ -1143,12 +1137,55 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
1143int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 1137int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
1144 struct drm_file *filp); 1138 struct drm_file *filp);
1145 1139
1146/* VRAM scratch page for HDP bug */ 1140/* VRAM scratch page for HDP bug, default vram page */
1147struct r700_vram_scratch { 1141struct r600_vram_scratch {
1148 struct radeon_bo *robj; 1142 struct radeon_bo *robj;
1149 volatile uint32_t *ptr; 1143 volatile uint32_t *ptr;
1144 u64 gpu_addr;
1150}; 1145};
1151 1146
1147
1148/*
1149 * Mutex which allows recursive locking from the same process.
1150 */
1151struct radeon_mutex {
1152 struct mutex mutex;
1153 struct task_struct *owner;
1154 int level;
1155};
1156
1157static inline void radeon_mutex_init(struct radeon_mutex *mutex)
1158{
1159 mutex_init(&mutex->mutex);
1160 mutex->owner = NULL;
1161 mutex->level = 0;
1162}
1163
1164static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
1165{
1166 if (mutex_trylock(&mutex->mutex)) {
1167 /* The mutex was unlocked before, so it's ours now */
1168 mutex->owner = current;
1169 } else if (mutex->owner != current) {
1170 /* Another process locked the mutex, take it */
1171 mutex_lock(&mutex->mutex);
1172 mutex->owner = current;
1173 }
1174 /* Otherwise the mutex was already locked by this process */
1175
1176 mutex->level++;
1177}
1178
1179static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
1180{
1181 if (--mutex->level > 0)
1182 return;
1183
1184 mutex->owner = NULL;
1185 mutex_unlock(&mutex->mutex);
1186}
1187
1188
1152/* 1189/*
1153 * Core structure, functions and helpers. 1190 * Core structure, functions and helpers.
1154 */ 1191 */
@@ -1204,7 +1241,7 @@ struct radeon_device {
1204 struct radeon_gem gem; 1241 struct radeon_gem gem;
1205 struct radeon_pm pm; 1242 struct radeon_pm pm;
1206 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; 1243 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
1207 struct mutex cs_mutex; 1244 struct radeon_mutex cs_mutex;
1208 struct radeon_wb wb; 1245 struct radeon_wb wb;
1209 struct radeon_dummy_page dummy_page; 1246 struct radeon_dummy_page dummy_page;
1210 bool gpu_lockup; 1247 bool gpu_lockup;
@@ -1218,7 +1255,7 @@ struct radeon_device {
1218 const struct firmware *rlc_fw; /* r6/700 RLC firmware */ 1255 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
1219 const struct firmware *mc_fw; /* NI MC firmware */ 1256 const struct firmware *mc_fw; /* NI MC firmware */
1220 struct r600_blit r600_blit; 1257 struct r600_blit r600_blit;
1221 struct r700_vram_scratch vram_scratch; 1258 struct r600_vram_scratch vram_scratch;
1222 int msi_enabled; /* msi enabled */ 1259 int msi_enabled; /* msi enabled */
1223 struct r600_ih ih; /* r6/700 interrupt ring */ 1260 struct r600_ih ih; /* r6/700 interrupt ring */
1224 struct work_struct hotplug_work; 1261 struct work_struct hotplug_work;
@@ -1442,8 +1479,6 @@ void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
1442/* AGP */ 1479/* AGP */
1443extern int radeon_gpu_reset(struct radeon_device *rdev); 1480extern int radeon_gpu_reset(struct radeon_device *rdev);
1444extern void radeon_agp_disable(struct radeon_device *rdev); 1481extern void radeon_agp_disable(struct radeon_device *rdev);
1445extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
1446extern void radeon_gart_restore(struct radeon_device *rdev);
1447extern int radeon_modeset_init(struct radeon_device *rdev); 1482extern int radeon_modeset_init(struct radeon_device *rdev);
1448extern void radeon_modeset_fini(struct radeon_device *rdev); 1483extern void radeon_modeset_fini(struct radeon_device *rdev);
1449extern bool radeon_card_posted(struct radeon_device *rdev); 1484extern bool radeon_card_posted(struct radeon_device *rdev);
@@ -1467,6 +1502,12 @@ extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1467extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); 1502extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
1468 1503
1469/* 1504/*
1505 * R600 vram scratch functions
1506 */
1507int r600_vram_scratch_init(struct radeon_device *rdev);
1508void r600_vram_scratch_fini(struct radeon_device *rdev);
1509
1510/*
1470 * r600 functions used by radeon_encoder.c 1511 * r600 functions used by radeon_encoder.c
1471 */ 1512 */
1472extern void r600_hdmi_enable(struct drm_encoder *encoder); 1513extern void r600_hdmi_enable(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e2944566ffea..a2e1eae114ef 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -834,7 +834,7 @@ static struct radeon_asic sumo_asic = {
834 .pm_misc = &evergreen_pm_misc, 834 .pm_misc = &evergreen_pm_misc,
835 .pm_prepare = &evergreen_pm_prepare, 835 .pm_prepare = &evergreen_pm_prepare,
836 .pm_finish = &evergreen_pm_finish, 836 .pm_finish = &evergreen_pm_finish,
837 .pm_init_profile = &rs780_pm_init_profile, 837 .pm_init_profile = &sumo_pm_init_profile,
838 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 838 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
839 .pre_page_flip = &evergreen_pre_page_flip, 839 .pre_page_flip = &evergreen_pre_page_flip,
840 .page_flip = &evergreen_page_flip, 840 .page_flip = &evergreen_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 85f14f0337e4..59914842a729 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -413,6 +413,7 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p);
413extern void evergreen_pm_misc(struct radeon_device *rdev); 413extern void evergreen_pm_misc(struct radeon_device *rdev);
414extern void evergreen_pm_prepare(struct radeon_device *rdev); 414extern void evergreen_pm_prepare(struct radeon_device *rdev);
415extern void evergreen_pm_finish(struct radeon_device *rdev); 415extern void evergreen_pm_finish(struct radeon_device *rdev);
416extern void sumo_pm_init_profile(struct radeon_device *rdev);
416extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); 417extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
417extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 418extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
418extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); 419extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 08d0b94332e6..d2d179267af3 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1999,6 +1999,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
1999 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 1999 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2000 switch (frev) { 2000 switch (frev) {
2001 case 1: 2001 case 1:
2002 rdev->pm.power_state[state_index].clock_info =
2003 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2004 if (!rdev->pm.power_state[state_index].clock_info)
2005 return state_index;
2002 rdev->pm.power_state[state_index].num_clock_modes = 1; 2006 rdev->pm.power_state[state_index].num_clock_modes = 1;
2003 rdev->pm.power_state[state_index].clock_info[0].mclk = 2007 rdev->pm.power_state[state_index].clock_info[0].mclk =
2004 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); 2008 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
@@ -2035,6 +2039,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2035 state_index++; 2039 state_index++;
2036 break; 2040 break;
2037 case 2: 2041 case 2:
2042 rdev->pm.power_state[state_index].clock_info =
2043 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2044 if (!rdev->pm.power_state[state_index].clock_info)
2045 return state_index;
2038 rdev->pm.power_state[state_index].num_clock_modes = 1; 2046 rdev->pm.power_state[state_index].num_clock_modes = 1;
2039 rdev->pm.power_state[state_index].clock_info[0].mclk = 2047 rdev->pm.power_state[state_index].clock_info[0].mclk =
2040 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); 2048 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
@@ -2072,6 +2080,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2072 state_index++; 2080 state_index++;
2073 break; 2081 break;
2074 case 3: 2082 case 3:
2083 rdev->pm.power_state[state_index].clock_info =
2084 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2085 if (!rdev->pm.power_state[state_index].clock_info)
2086 return state_index;
2075 rdev->pm.power_state[state_index].num_clock_modes = 1; 2087 rdev->pm.power_state[state_index].num_clock_modes = 1;
2076 rdev->pm.power_state[state_index].clock_info[0].mclk = 2088 rdev->pm.power_state[state_index].clock_info[0].mclk =
2077 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); 2089 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
@@ -2257,7 +2269,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
2257 rdev->pm.default_power_state_index = state_index; 2269 rdev->pm.default_power_state_index = state_index;
2258 rdev->pm.power_state[state_index].default_clock_mode = 2270 rdev->pm.power_state[state_index].default_clock_mode =
2259 &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; 2271 &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
2260 if (ASIC_IS_DCE5(rdev)) { 2272 if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
2261 /* NI chips post without MC ucode, so default clocks are strobe mode only */ 2273 /* NI chips post without MC ucode, so default clocks are strobe mode only */
2262 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; 2274 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
2263 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; 2275 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
@@ -2377,17 +2389,31 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
2377 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + 2389 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
2378 (power_state->v1.ucNonClockStateIndex * 2390 (power_state->v1.ucNonClockStateIndex *
2379 power_info->pplib.ucNonClockSize)); 2391 power_info->pplib.ucNonClockSize));
2380 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { 2392 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
2381 clock_info = (union pplib_clock_info *) 2393 ((power_info->pplib.ucStateEntrySize - 1) ?
2382 (mode_info->atom_context->bios + data_offset + 2394 (power_info->pplib.ucStateEntrySize - 1) : 1),
2383 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + 2395 GFP_KERNEL);
2384 (power_state->v1.ucClockStateIndices[j] * 2396 if (!rdev->pm.power_state[i].clock_info)
2385 power_info->pplib.ucClockInfoSize)); 2397 return state_index;
2386 valid = radeon_atombios_parse_pplib_clock_info(rdev, 2398 if (power_info->pplib.ucStateEntrySize - 1) {
2387 state_index, mode_index, 2399 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
2388 clock_info); 2400 clock_info = (union pplib_clock_info *)
2389 if (valid) 2401 (mode_info->atom_context->bios + data_offset +
2390 mode_index++; 2402 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
2403 (power_state->v1.ucClockStateIndices[j] *
2404 power_info->pplib.ucClockInfoSize));
2405 valid = radeon_atombios_parse_pplib_clock_info(rdev,
2406 state_index, mode_index,
2407 clock_info);
2408 if (valid)
2409 mode_index++;
2410 }
2411 } else {
2412 rdev->pm.power_state[state_index].clock_info[0].mclk =
2413 rdev->clock.default_mclk;
2414 rdev->pm.power_state[state_index].clock_info[0].sclk =
2415 rdev->clock.default_sclk;
2416 mode_index++;
2391 } 2417 }
2392 rdev->pm.power_state[state_index].num_clock_modes = mode_index; 2418 rdev->pm.power_state[state_index].num_clock_modes = mode_index;
2393 if (mode_index) { 2419 if (mode_index) {
@@ -2456,18 +2482,32 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2456 non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ 2482 non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
2457 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2483 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2458 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2484 &non_clock_info_array->nonClockInfo[non_clock_array_index];
2459 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2485 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
2460 clock_array_index = power_state->v2.clockInfoIndex[j]; 2486 (power_state->v2.ucNumDPMLevels ?
2461 /* XXX this might be an inagua bug... */ 2487 power_state->v2.ucNumDPMLevels : 1),
2462 if (clock_array_index >= clock_info_array->ucNumEntries) 2488 GFP_KERNEL);
2463 continue; 2489 if (!rdev->pm.power_state[i].clock_info)
2464 clock_info = (union pplib_clock_info *) 2490 return state_index;
2465 &clock_info_array->clockInfo[clock_array_index]; 2491 if (power_state->v2.ucNumDPMLevels) {
2466 valid = radeon_atombios_parse_pplib_clock_info(rdev, 2492 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2467 state_index, mode_index, 2493 clock_array_index = power_state->v2.clockInfoIndex[j];
2468 clock_info); 2494 /* XXX this might be an inagua bug... */
2469 if (valid) 2495 if (clock_array_index >= clock_info_array->ucNumEntries)
2470 mode_index++; 2496 continue;
2497 clock_info = (union pplib_clock_info *)
2498 &clock_info_array->clockInfo[clock_array_index];
2499 valid = radeon_atombios_parse_pplib_clock_info(rdev,
2500 state_index, mode_index,
2501 clock_info);
2502 if (valid)
2503 mode_index++;
2504 }
2505 } else {
2506 rdev->pm.power_state[state_index].clock_info[0].mclk =
2507 rdev->clock.default_mclk;
2508 rdev->pm.power_state[state_index].clock_info[0].sclk =
2509 rdev->clock.default_sclk;
2510 mode_index++;
2471 } 2511 }
2472 rdev->pm.power_state[state_index].num_clock_modes = mode_index; 2512 rdev->pm.power_state[state_index].num_clock_modes = mode_index;
2473 if (mode_index) { 2513 if (mode_index) {
@@ -2524,19 +2564,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
2524 } else { 2564 } else {
2525 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); 2565 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
2526 if (rdev->pm.power_state) { 2566 if (rdev->pm.power_state) {
2527 /* add the default mode */ 2567 rdev->pm.power_state[0].clock_info =
2528 rdev->pm.power_state[state_index].type = 2568 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2529 POWER_STATE_TYPE_DEFAULT; 2569 if (rdev->pm.power_state[0].clock_info) {
2530 rdev->pm.power_state[state_index].num_clock_modes = 1; 2570 /* add the default mode */
2531 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; 2571 rdev->pm.power_state[state_index].type =
2532 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; 2572 POWER_STATE_TYPE_DEFAULT;
2533 rdev->pm.power_state[state_index].default_clock_mode = 2573 rdev->pm.power_state[state_index].num_clock_modes = 1;
2534 &rdev->pm.power_state[state_index].clock_info[0]; 2574 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
2535 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 2575 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
2536 rdev->pm.power_state[state_index].pcie_lanes = 16; 2576 rdev->pm.power_state[state_index].default_clock_mode =
2537 rdev->pm.default_power_state_index = state_index; 2577 &rdev->pm.power_state[state_index].clock_info[0];
2538 rdev->pm.power_state[state_index].flags = 0; 2578 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2539 state_index++; 2579 rdev->pm.power_state[state_index].pcie_lanes = 16;
2580 rdev->pm.default_power_state_index = state_index;
2581 rdev->pm.power_state[state_index].flags = 0;
2582 state_index++;
2583 }
2540 } 2584 }
2541 } 2585 }
2542 2586
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 5cafc90de7f8..17e1a9b2d8fb 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -98,7 +98,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
98 struct radeon_bo *sobj = NULL; 98 struct radeon_bo *sobj = NULL;
99 uint64_t saddr, daddr; 99 uint64_t saddr, daddr;
100 int r, n; 100 int r, n;
101 unsigned int time; 101 int time;
102 102
103 n = RADEON_BENCHMARK_ITERATIONS; 103 n = RADEON_BENCHMARK_ITERATIONS;
104 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); 104 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 8bf83c4b4147..81fc100be7e1 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2563,14 +2563,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
2563 2563
2564 /* allocate 2 power states */ 2564 /* allocate 2 power states */
2565 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL); 2565 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
2566 if (!rdev->pm.power_state) { 2566 if (rdev->pm.power_state) {
2567 rdev->pm.default_power_state_index = state_index; 2567 /* allocate 1 clock mode per state */
2568 rdev->pm.num_power_states = 0; 2568 rdev->pm.power_state[0].clock_info =
2569 2569 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2570 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 2570 rdev->pm.power_state[1].clock_info =
2571 rdev->pm.current_clock_mode_index = 0; 2571 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2572 return; 2572 if (!rdev->pm.power_state[0].clock_info ||
2573 } 2573 !rdev->pm.power_state[1].clock_info)
2574 goto pm_failed;
2575 } else
2576 goto pm_failed;
2574 2577
2575 /* check for a thermal chip */ 2578 /* check for a thermal chip */
2576 offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); 2579 offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
@@ -2735,6 +2738,14 @@ default_mode:
2735 2738
2736 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 2739 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
2737 rdev->pm.current_clock_mode_index = 0; 2740 rdev->pm.current_clock_mode_index = 0;
2741 return;
2742
2743pm_failed:
2744 rdev->pm.default_power_state_index = state_index;
2745 rdev->pm.num_power_states = 0;
2746
2747 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
2748 rdev->pm.current_clock_mode_index = 0;
2738} 2749}
2739 2750
2740void radeon_external_tmds_setup(struct drm_encoder *encoder) 2751void radeon_external_tmds_setup(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index dec6cbe6a0a6..e7cb3ab09243 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -44,8 +44,6 @@ extern void
44radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, 44radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
45 struct drm_connector *drm_connector); 45 struct drm_connector *drm_connector);
46 46
47bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
48
49void radeon_connector_hotplug(struct drm_connector *connector) 47void radeon_connector_hotplug(struct drm_connector *connector)
50{ 48{
51 struct drm_device *dev = connector->dev; 49 struct drm_device *dev = connector->dev;
@@ -432,55 +430,6 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
432 return 0; 430 return 0;
433} 431}
434 432
435/*
436 * Some integrated ATI Radeon chipset implementations (e. g.
437 * Asus M2A-VM HDMI) may indicate the availability of a DDC,
438 * even when there's no monitor connected. For these connectors
439 * following DDC probe extension will be applied: check also for the
440 * availability of EDID with at least a correct EDID header. Only then,
441 * DDC is assumed to be available. This prevents drm_get_edid() and
442 * drm_edid_block_valid() from periodically dumping data and kernel
443 * errors into the logs and onto the terminal.
444 */
445static bool radeon_connector_needs_extended_probe(struct radeon_device *dev,
446 uint32_t supported_device,
447 int connector_type)
448{
449 /* Asus M2A-VM HDMI board sends data to i2c bus even,
450 * if HDMI add-on card is not plugged in or HDMI is disabled in
451 * BIOS. Valid DDC can only be assumed, if also a valid EDID header
452 * can be retrieved via i2c bus during DDC probe */
453 if ((dev->pdev->device == 0x791e) &&
454 (dev->pdev->subsystem_vendor == 0x1043) &&
455 (dev->pdev->subsystem_device == 0x826d)) {
456 if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
457 (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
458 return true;
459 }
460 /* ECS A740GM-M with ATI RADEON 2100 sends data to i2c bus
461 * for a DVI connector that is not implemented */
462 if ((dev->pdev->device == 0x796e) &&
463 (dev->pdev->subsystem_vendor == 0x1019) &&
464 (dev->pdev->subsystem_device == 0x2615)) {
465 if ((connector_type == DRM_MODE_CONNECTOR_DVID) &&
466 (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
467 return true;
468 }
469 /* TOSHIBA Satellite L300D with ATI Mobility Radeon x1100
470 * (RS690M) sends data to i2c bus for a HDMI connector that
471 * is not implemented */
472 if ((dev->pdev->device == 0x791f) &&
473 (dev->pdev->subsystem_vendor == 0x1179) &&
474 (dev->pdev->subsystem_device == 0xff68)) {
475 if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
476 (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
477 return true;
478 }
479
480 /* Default: no EDID header probe required for DDC probing */
481 return false;
482}
483
484static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, 433static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
485 struct drm_connector *connector) 434 struct drm_connector *connector)
486{ 435{
@@ -721,8 +670,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
721 ret = connector_status_disconnected; 670 ret = connector_status_disconnected;
722 671
723 if (radeon_connector->ddc_bus) 672 if (radeon_connector->ddc_bus)
724 dret = radeon_ddc_probe(radeon_connector, 673 dret = radeon_ddc_probe(radeon_connector);
725 radeon_connector->requires_extended_probe);
726 if (dret) { 674 if (dret) {
727 radeon_connector->detected_by_load = false; 675 radeon_connector->detected_by_load = false;
728 if (radeon_connector->edid) { 676 if (radeon_connector->edid) {
@@ -764,7 +712,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
764 if (radeon_connector->dac_load_detect && encoder) { 712 if (radeon_connector->dac_load_detect && encoder) {
765 encoder_funcs = encoder->helper_private; 713 encoder_funcs = encoder->helper_private;
766 ret = encoder_funcs->detect(encoder, connector); 714 ret = encoder_funcs->detect(encoder, connector);
767 if (ret == connector_status_connected) 715 if (ret != connector_status_disconnected)
768 radeon_connector->detected_by_load = true; 716 radeon_connector->detected_by_load = true;
769 } 717 }
770 } 718 }
@@ -904,8 +852,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
904 bool dret = false; 852 bool dret = false;
905 853
906 if (radeon_connector->ddc_bus) 854 if (radeon_connector->ddc_bus)
907 dret = radeon_ddc_probe(radeon_connector, 855 dret = radeon_ddc_probe(radeon_connector);
908 radeon_connector->requires_extended_probe);
909 if (dret) { 856 if (dret) {
910 radeon_connector->detected_by_load = false; 857 radeon_connector->detected_by_load = false;
911 if (radeon_connector->edid) { 858 if (radeon_connector->edid) {
@@ -1005,8 +952,9 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1005 ret = encoder_funcs->detect(encoder, connector); 952 ret = encoder_funcs->detect(encoder, connector);
1006 if (ret == connector_status_connected) { 953 if (ret == connector_status_connected) {
1007 radeon_connector->use_digital = false; 954 radeon_connector->use_digital = false;
1008 radeon_connector->detected_by_load = true;
1009 } 955 }
956 if (ret != connector_status_disconnected)
957 radeon_connector->detected_by_load = true;
1010 } 958 }
1011 break; 959 break;
1012 } 960 }
@@ -1203,7 +1151,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
1203 } 1151 }
1204 } else { 1152 } else {
1205 /* need to setup ddc on the bridge */ 1153 /* need to setup ddc on the bridge */
1206 if (radeon_connector_encoder_is_dp_bridge(connector)) { 1154 if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
1155 ENCODER_OBJECT_ID_NONE) {
1207 if (encoder) 1156 if (encoder)
1208 radeon_atom_ext_encoder_setup_ddc(encoder); 1157 radeon_atom_ext_encoder_setup_ddc(encoder);
1209 } 1158 }
@@ -1213,13 +1162,12 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
1213 return ret; 1162 return ret;
1214} 1163}
1215 1164
1216bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector) 1165u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
1217{ 1166{
1218 struct drm_mode_object *obj; 1167 struct drm_mode_object *obj;
1219 struct drm_encoder *encoder; 1168 struct drm_encoder *encoder;
1220 struct radeon_encoder *radeon_encoder; 1169 struct radeon_encoder *radeon_encoder;
1221 int i; 1170 int i;
1222 bool found = false;
1223 1171
1224 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 1172 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
1225 if (connector->encoder_ids[i] == 0) 1173 if (connector->encoder_ids[i] == 0)
@@ -1235,14 +1183,13 @@ bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector)
1235 switch (radeon_encoder->encoder_id) { 1183 switch (radeon_encoder->encoder_id) {
1236 case ENCODER_OBJECT_ID_TRAVIS: 1184 case ENCODER_OBJECT_ID_TRAVIS:
1237 case ENCODER_OBJECT_ID_NUTMEG: 1185 case ENCODER_OBJECT_ID_NUTMEG:
1238 found = true; 1186 return radeon_encoder->encoder_id;
1239 break;
1240 default: 1187 default:
1241 break; 1188 break;
1242 } 1189 }
1243 } 1190 }
1244 1191
1245 return found; 1192 return ENCODER_OBJECT_ID_NONE;
1246} 1193}
1247 1194
1248bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) 1195bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
@@ -1319,7 +1266,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1319 if (!radeon_dig_connector->edp_on) 1266 if (!radeon_dig_connector->edp_on)
1320 atombios_set_edp_panel_power(connector, 1267 atombios_set_edp_panel_power(connector,
1321 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1268 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1322 } else if (radeon_connector_encoder_is_dp_bridge(connector)) { 1269 } else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
1270 ENCODER_OBJECT_ID_NONE) {
1323 /* DP bridges are always DP */ 1271 /* DP bridges are always DP */
1324 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; 1272 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
1325 /* get the DPCD from the bridge */ 1273 /* get the DPCD from the bridge */
@@ -1328,8 +1276,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1328 if (encoder) { 1276 if (encoder) {
1329 /* setup ddc on the bridge */ 1277 /* setup ddc on the bridge */
1330 radeon_atom_ext_encoder_setup_ddc(encoder); 1278 radeon_atom_ext_encoder_setup_ddc(encoder);
1331 if (radeon_ddc_probe(radeon_connector, 1279 if (radeon_ddc_probe(radeon_connector)) /* try DDC */
1332 radeon_connector->requires_extended_probe)) /* try DDC */
1333 ret = connector_status_connected; 1280 ret = connector_status_connected;
1334 else if (radeon_connector->dac_load_detect) { /* try load detection */ 1281 else if (radeon_connector->dac_load_detect) { /* try load detection */
1335 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 1282 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -1347,8 +1294,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1347 if (radeon_dp_getdpcd(radeon_connector)) 1294 if (radeon_dp_getdpcd(radeon_connector))
1348 ret = connector_status_connected; 1295 ret = connector_status_connected;
1349 } else { 1296 } else {
1350 if (radeon_ddc_probe(radeon_connector, 1297 if (radeon_ddc_probe(radeon_connector))
1351 radeon_connector->requires_extended_probe))
1352 ret = connector_status_connected; 1298 ret = connector_status_connected;
1353 } 1299 }
1354 } 1300 }
@@ -1493,9 +1439,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1493 radeon_connector->shared_ddc = shared_ddc; 1439 radeon_connector->shared_ddc = shared_ddc;
1494 radeon_connector->connector_object_id = connector_object_id; 1440 radeon_connector->connector_object_id = connector_object_id;
1495 radeon_connector->hpd = *hpd; 1441 radeon_connector->hpd = *hpd;
1496 radeon_connector->requires_extended_probe = 1442
1497 radeon_connector_needs_extended_probe(rdev, supported_device,
1498 connector_type);
1499 radeon_connector->router = *router; 1443 radeon_connector->router = *router;
1500 if (router->ddc_valid || router->cd_valid) { 1444 if (router->ddc_valid || router->cd_valid) {
1501 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); 1445 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
@@ -1842,9 +1786,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1842 radeon_connector->devices = supported_device; 1786 radeon_connector->devices = supported_device;
1843 radeon_connector->connector_object_id = connector_object_id; 1787 radeon_connector->connector_object_id = connector_object_id;
1844 radeon_connector->hpd = *hpd; 1788 radeon_connector->hpd = *hpd;
1845 radeon_connector->requires_extended_probe = 1789
1846 radeon_connector_needs_extended_probe(rdev, supported_device,
1847 connector_type);
1848 switch (connector_type) { 1790 switch (connector_type) {
1849 case DRM_MODE_CONNECTOR_VGA: 1791 case DRM_MODE_CONNECTOR_VGA:
1850 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1792 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index fae00c0d75aa..ccaa243c1442 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -222,7 +222,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
222 struct radeon_cs_chunk *ib_chunk; 222 struct radeon_cs_chunk *ib_chunk;
223 int r; 223 int r;
224 224
225 mutex_lock(&rdev->cs_mutex); 225 radeon_mutex_lock(&rdev->cs_mutex);
226 /* initialize parser */ 226 /* initialize parser */
227 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 227 memset(&parser, 0, sizeof(struct radeon_cs_parser));
228 parser.filp = filp; 228 parser.filp = filp;
@@ -233,14 +233,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
233 if (r) { 233 if (r) {
234 DRM_ERROR("Failed to initialize parser !\n"); 234 DRM_ERROR("Failed to initialize parser !\n");
235 radeon_cs_parser_fini(&parser, r); 235 radeon_cs_parser_fini(&parser, r);
236 mutex_unlock(&rdev->cs_mutex); 236 radeon_mutex_unlock(&rdev->cs_mutex);
237 return r; 237 return r;
238 } 238 }
239 r = radeon_ib_get(rdev, &parser.ib); 239 r = radeon_ib_get(rdev, &parser.ib);
240 if (r) { 240 if (r) {
241 DRM_ERROR("Failed to get ib !\n"); 241 DRM_ERROR("Failed to get ib !\n");
242 radeon_cs_parser_fini(&parser, r); 242 radeon_cs_parser_fini(&parser, r);
243 mutex_unlock(&rdev->cs_mutex); 243 radeon_mutex_unlock(&rdev->cs_mutex);
244 return r; 244 return r;
245 } 245 }
246 r = radeon_cs_parser_relocs(&parser); 246 r = radeon_cs_parser_relocs(&parser);
@@ -248,7 +248,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
248 if (r != -ERESTARTSYS) 248 if (r != -ERESTARTSYS)
249 DRM_ERROR("Failed to parse relocation %d!\n", r); 249 DRM_ERROR("Failed to parse relocation %d!\n", r);
250 radeon_cs_parser_fini(&parser, r); 250 radeon_cs_parser_fini(&parser, r);
251 mutex_unlock(&rdev->cs_mutex); 251 radeon_mutex_unlock(&rdev->cs_mutex);
252 return r; 252 return r;
253 } 253 }
254 /* Copy the packet into the IB, the parser will read from the 254 /* Copy the packet into the IB, the parser will read from the
@@ -260,14 +260,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
260 if (r || parser.parser_error) { 260 if (r || parser.parser_error) {
261 DRM_ERROR("Invalid command stream !\n"); 261 DRM_ERROR("Invalid command stream !\n");
262 radeon_cs_parser_fini(&parser, r); 262 radeon_cs_parser_fini(&parser, r);
263 mutex_unlock(&rdev->cs_mutex); 263 radeon_mutex_unlock(&rdev->cs_mutex);
264 return r; 264 return r;
265 } 265 }
266 r = radeon_cs_finish_pages(&parser); 266 r = radeon_cs_finish_pages(&parser);
267 if (r) { 267 if (r) {
268 DRM_ERROR("Invalid command stream !\n"); 268 DRM_ERROR("Invalid command stream !\n");
269 radeon_cs_parser_fini(&parser, r); 269 radeon_cs_parser_fini(&parser, r);
270 mutex_unlock(&rdev->cs_mutex); 270 radeon_mutex_unlock(&rdev->cs_mutex);
271 return r; 271 return r;
272 } 272 }
273 r = radeon_ib_schedule(rdev, parser.ib); 273 r = radeon_ib_schedule(rdev, parser.ib);
@@ -275,7 +275,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
275 DRM_ERROR("Failed to schedule IB !\n"); 275 DRM_ERROR("Failed to schedule IB !\n");
276 } 276 }
277 radeon_cs_parser_fini(&parser, r); 277 radeon_cs_parser_fini(&parser, r);
278 mutex_unlock(&rdev->cs_mutex); 278 radeon_mutex_unlock(&rdev->cs_mutex);
279 return r; 279 return r;
280} 280}
281 281
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c33bc914d93d..c4d00a171411 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -716,7 +716,7 @@ int radeon_device_init(struct radeon_device *rdev,
716 716
717 /* mutex initialization are all done here so we 717 /* mutex initialization are all done here so we
718 * can recall function without having locking issues */ 718 * can recall function without having locking issues */
719 mutex_init(&rdev->cs_mutex); 719 radeon_mutex_init(&rdev->cs_mutex);
720 mutex_init(&rdev->ib_pool.mutex); 720 mutex_init(&rdev->ib_pool.mutex);
721 mutex_init(&rdev->cp.mutex); 721 mutex_init(&rdev->cp.mutex);
722 mutex_init(&rdev->dc_hw_i2c_mutex); 722 mutex_init(&rdev->dc_hw_i2c_mutex);
@@ -955,6 +955,9 @@ int radeon_gpu_reset(struct radeon_device *rdev)
955 int r; 955 int r;
956 int resched; 956 int resched;
957 957
958 /* Prevent CS ioctl from interfering */
959 radeon_mutex_lock(&rdev->cs_mutex);
960
958 radeon_save_bios_scratch_regs(rdev); 961 radeon_save_bios_scratch_regs(rdev);
959 /* block TTM */ 962 /* block TTM */
960 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 963 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -967,10 +970,15 @@ int radeon_gpu_reset(struct radeon_device *rdev)
967 radeon_restore_bios_scratch_regs(rdev); 970 radeon_restore_bios_scratch_regs(rdev);
968 drm_helper_resume_force_mode(rdev->ddev); 971 drm_helper_resume_force_mode(rdev->ddev);
969 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 972 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
970 return 0;
971 } 973 }
972 /* bad news, how to tell it to userspace ? */ 974
973 dev_info(rdev->dev, "GPU reset failed\n"); 975 radeon_mutex_unlock(&rdev->cs_mutex);
976
977 if (r) {
978 /* bad news, how to tell it to userspace ? */
979 dev_info(rdev->dev, "GPU reset failed\n");
980 }
981
974 return r; 982 return r;
975} 983}
976 984
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6adb3e58affd..a22d6e6a49a2 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -33,8 +33,6 @@
33#include "drm_crtc_helper.h" 33#include "drm_crtc_helper.h"
34#include "drm_edid.h" 34#include "drm_edid.h"
35 35
36static int radeon_ddc_dump(struct drm_connector *connector);
37
38static void avivo_crtc_load_lut(struct drm_crtc *crtc) 36static void avivo_crtc_load_lut(struct drm_crtc *crtc)
39{ 37{
40 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 38 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -669,7 +667,6 @@ static void radeon_print_display_setup(struct drm_device *dev)
669static bool radeon_setup_enc_conn(struct drm_device *dev) 667static bool radeon_setup_enc_conn(struct drm_device *dev)
670{ 668{
671 struct radeon_device *rdev = dev->dev_private; 669 struct radeon_device *rdev = dev->dev_private;
672 struct drm_connector *drm_connector;
673 bool ret = false; 670 bool ret = false;
674 671
675 if (rdev->bios) { 672 if (rdev->bios) {
@@ -689,8 +686,6 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
689 if (ret) { 686 if (ret) {
690 radeon_setup_encoder_clones(dev); 687 radeon_setup_encoder_clones(dev);
691 radeon_print_display_setup(dev); 688 radeon_print_display_setup(dev);
692 list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
693 radeon_ddc_dump(drm_connector);
694 } 689 }
695 690
696 return ret; 691 return ret;
@@ -708,7 +703,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
708 703
709 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 704 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
710 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || 705 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
711 radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) { 706 (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
707 ENCODER_OBJECT_ID_NONE)) {
712 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 708 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
713 709
714 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || 710 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
@@ -743,34 +739,6 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
743 return 0; 739 return 0;
744} 740}
745 741
746static int radeon_ddc_dump(struct drm_connector *connector)
747{
748 struct edid *edid;
749 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
750 int ret = 0;
751
752 /* on hw with routers, select right port */
753 if (radeon_connector->router.ddc_valid)
754 radeon_router_select_ddc_port(radeon_connector);
755
756 if (!radeon_connector->ddc_bus)
757 return -1;
758 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
759 /* Log EDID retrieval status here. In particular with regard to
760 * connectors with requires_extended_probe flag set, that will prevent
761 * function radeon_dvi_detect() to fetch EDID on this connector,
762 * as long as there is no valid EDID header found */
763 if (edid) {
764 DRM_INFO("Radeon display connector %s: Found valid EDID",
765 drm_get_connector_name(connector));
766 kfree(edid);
767 } else {
768 DRM_INFO("Radeon display connector %s: No monitor connected or invalid EDID",
769 drm_get_connector_name(connector));
770 }
771 return ret;
772}
773
774/* avivo */ 742/* avivo */
775static void avivo_get_fb_div(struct radeon_pll *pll, 743static void avivo_get_fb_div(struct radeon_pll *pll,
776 u32 target_clock, 744 u32 target_clock,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 969933833ccb..a0b35e909489 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -119,6 +119,7 @@ int radeon_audio = 0;
119int radeon_disp_priority = 0; 119int radeon_disp_priority = 0;
120int radeon_hw_i2c = 0; 120int radeon_hw_i2c = 0;
121int radeon_pcie_gen2 = 0; 121int radeon_pcie_gen2 = 0;
122int radeon_msi = -1;
122 123
123MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 124MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
124module_param_named(no_wb, radeon_no_wb, int, 0444); 125module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -165,6 +166,9 @@ module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
165MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)"); 166MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)");
166module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444); 167module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
167 168
169MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
170module_param_named(msi, radeon_msi, int, 0444);
171
168static int radeon_suspend(struct drm_device *dev, pm_message_t state) 172static int radeon_suspend(struct drm_device *dev, pm_message_t state)
169{ 173{
170 drm_radeon_private_t *dev_priv = dev->dev_private; 174 drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index eb3f6dc6df83..06e413e6a920 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -29,12 +29,6 @@
29#include "radeon.h" 29#include "radeon.h"
30#include "atom.h" 30#include "atom.h"
31 31
32extern int atom_debug;
33
34/* evil but including atombios.h is much worse */
35bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
36 struct drm_display_mode *mode);
37
38static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) 32static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
39{ 33{
40 struct drm_device *dev = encoder->dev; 34 struct drm_device *dev = encoder->dev;
@@ -156,27 +150,6 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
156 return ret; 150 return ret;
157} 151}
158 152
159static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
160{
161 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
162 switch (radeon_encoder->encoder_id) {
163 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
164 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
165 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
166 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
167 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
168 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
169 case ENCODER_OBJECT_ID_INTERNAL_DDI:
170 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
171 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
172 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
173 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
174 return true;
175 default:
176 return false;
177 }
178}
179
180void 153void
181radeon_link_encoder_connector(struct drm_device *dev) 154radeon_link_encoder_connector(struct drm_device *dev)
182{ 155{
@@ -229,23 +202,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
229 return NULL; 202 return NULL;
230} 203}
231 204
232static struct drm_connector * 205struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder)
233radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
234{
235 struct drm_device *dev = encoder->dev;
236 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
237 struct drm_connector *connector;
238 struct radeon_connector *radeon_connector;
239
240 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
241 radeon_connector = to_radeon_connector(connector);
242 if (radeon_encoder->devices & radeon_connector->devices)
243 return connector;
244 }
245 return NULL;
246}
247
248struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder)
249{ 206{
250 struct drm_device *dev = encoder->dev; 207 struct drm_device *dev = encoder->dev;
251 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 208 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -266,9 +223,9 @@ struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder
266 return NULL; 223 return NULL;
267} 224}
268 225
269bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder) 226u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
270{ 227{
271 struct drm_encoder *other_encoder = radeon_atom_get_external_encoder(encoder); 228 struct drm_encoder *other_encoder = radeon_get_external_encoder(encoder);
272 229
273 if (other_encoder) { 230 if (other_encoder) {
274 struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder); 231 struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder);
@@ -332,2105 +289,3 @@ void radeon_panel_mode_fixup(struct drm_encoder *encoder,
332 289
333} 290}
334 291
335static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
336 struct drm_display_mode *mode,
337 struct drm_display_mode *adjusted_mode)
338{
339 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
340 struct drm_device *dev = encoder->dev;
341 struct radeon_device *rdev = dev->dev_private;
342
343 /* set the active encoder to connector routing */
344 radeon_encoder_set_active_device(encoder);
345 drm_mode_set_crtcinfo(adjusted_mode, 0);
346
347 /* hw bug */
348 if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
349 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
350 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
351
352 /* get the native mode for LVDS */
353 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
354 radeon_panel_mode_fixup(encoder, adjusted_mode);
355
356 /* get the native mode for TV */
357 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
358 struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
359 if (tv_dac) {
360 if (tv_dac->tv_std == TV_STD_NTSC ||
361 tv_dac->tv_std == TV_STD_NTSC_J ||
362 tv_dac->tv_std == TV_STD_PAL_M)
363 radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
364 else
365 radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
366 }
367 }
368
369 if (ASIC_IS_DCE3(rdev) &&
370 ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
371 radeon_encoder_is_dp_bridge(encoder))) {
372 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
373 radeon_dp_set_link_config(connector, mode);
374 }
375
376 return true;
377}
378
379static void
380atombios_dac_setup(struct drm_encoder *encoder, int action)
381{
382 struct drm_device *dev = encoder->dev;
383 struct radeon_device *rdev = dev->dev_private;
384 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
385 DAC_ENCODER_CONTROL_PS_ALLOCATION args;
386 int index = 0;
387 struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
388
389 memset(&args, 0, sizeof(args));
390
391 switch (radeon_encoder->encoder_id) {
392 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
393 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
394 index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
395 break;
396 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
397 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
398 index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
399 break;
400 }
401
402 args.ucAction = action;
403
404 if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT))
405 args.ucDacStandard = ATOM_DAC1_PS2;
406 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
407 args.ucDacStandard = ATOM_DAC1_CV;
408 else {
409 switch (dac_info->tv_std) {
410 case TV_STD_PAL:
411 case TV_STD_PAL_M:
412 case TV_STD_SCART_PAL:
413 case TV_STD_SECAM:
414 case TV_STD_PAL_CN:
415 args.ucDacStandard = ATOM_DAC1_PAL;
416 break;
417 case TV_STD_NTSC:
418 case TV_STD_NTSC_J:
419 case TV_STD_PAL_60:
420 default:
421 args.ucDacStandard = ATOM_DAC1_NTSC;
422 break;
423 }
424 }
425 args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
426
427 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
428
429}
430
431static void
432atombios_tv_setup(struct drm_encoder *encoder, int action)
433{
434 struct drm_device *dev = encoder->dev;
435 struct radeon_device *rdev = dev->dev_private;
436 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
437 TV_ENCODER_CONTROL_PS_ALLOCATION args;
438 int index = 0;
439 struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
440
441 memset(&args, 0, sizeof(args));
442
443 index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
444
445 args.sTVEncoder.ucAction = action;
446
447 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
448 args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
449 else {
450 switch (dac_info->tv_std) {
451 case TV_STD_NTSC:
452 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
453 break;
454 case TV_STD_PAL:
455 args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
456 break;
457 case TV_STD_PAL_M:
458 args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
459 break;
460 case TV_STD_PAL_60:
461 args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
462 break;
463 case TV_STD_NTSC_J:
464 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
465 break;
466 case TV_STD_SCART_PAL:
467 args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
468 break;
469 case TV_STD_SECAM:
470 args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
471 break;
472 case TV_STD_PAL_CN:
473 args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
474 break;
475 default:
476 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
477 break;
478 }
479 }
480
481 args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
482
483 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
484
485}
486
487union dvo_encoder_control {
488 ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
489 DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
490 DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
491};
492
493void
494atombios_dvo_setup(struct drm_encoder *encoder, int action)
495{
496 struct drm_device *dev = encoder->dev;
497 struct radeon_device *rdev = dev->dev_private;
498 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
499 union dvo_encoder_control args;
500 int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
501
502 memset(&args, 0, sizeof(args));
503
504 if (ASIC_IS_DCE3(rdev)) {
505 /* DCE3+ */
506 args.dvo_v3.ucAction = action;
507 args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
508 args.dvo_v3.ucDVOConfig = 0; /* XXX */
509 } else if (ASIC_IS_DCE2(rdev)) {
510 /* DCE2 (pre-DCE3 R6xx, RS600/690/740 */
511 args.dvo.sDVOEncoder.ucAction = action;
512 args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
513 /* DFP1, CRT1, TV1 depending on the type of port */
514 args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
515
516 if (radeon_encoder->pixel_clock > 165000)
517 args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
518 } else {
519 /* R4xx, R5xx */
520 args.ext_tmds.sXTmdsEncoder.ucEnable = action;
521
522 if (radeon_encoder->pixel_clock > 165000)
523 args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
524
525 /*if (pScrn->rgbBits == 8)*/
526 args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
527 }
528
529 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
530}
531
532union lvds_encoder_control {
533 LVDS_ENCODER_CONTROL_PS_ALLOCATION v1;
534 LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
535};
536
537void
538atombios_digital_setup(struct drm_encoder *encoder, int action)
539{
540 struct drm_device *dev = encoder->dev;
541 struct radeon_device *rdev = dev->dev_private;
542 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
543 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
544 union lvds_encoder_control args;
545 int index = 0;
546 int hdmi_detected = 0;
547 uint8_t frev, crev;
548
549 if (!dig)
550 return;
551
552 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
553 hdmi_detected = 1;
554
555 memset(&args, 0, sizeof(args));
556
557 switch (radeon_encoder->encoder_id) {
558 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
559 index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
560 break;
561 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
562 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
563 index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
564 break;
565 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
566 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
567 index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
568 else
569 index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
570 break;
571 }
572
573 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
574 return;
575
576 switch (frev) {
577 case 1:
578 case 2:
579 switch (crev) {
580 case 1:
581 args.v1.ucMisc = 0;
582 args.v1.ucAction = action;
583 if (hdmi_detected)
584 args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
585 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
586 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
587 if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
588 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
589 if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
590 args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
591 } else {
592 if (dig->linkb)
593 args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
594 if (radeon_encoder->pixel_clock > 165000)
595 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
596 /*if (pScrn->rgbBits == 8) */
597 args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
598 }
599 break;
600 case 2:
601 case 3:
602 args.v2.ucMisc = 0;
603 args.v2.ucAction = action;
604 if (crev == 3) {
605 if (dig->coherent_mode)
606 args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
607 }
608 if (hdmi_detected)
609 args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
610 args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
611 args.v2.ucTruncate = 0;
612 args.v2.ucSpatial = 0;
613 args.v2.ucTemporal = 0;
614 args.v2.ucFRC = 0;
615 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
616 if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
617 args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
618 if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
619 args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
620 if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
621 args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
622 }
623 if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
624 args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
625 if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
626 args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
627 if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
628 args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
629 }
630 } else {
631 if (dig->linkb)
632 args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
633 if (radeon_encoder->pixel_clock > 165000)
634 args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
635 }
636 break;
637 default:
638 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
639 break;
640 }
641 break;
642 default:
643 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
644 break;
645 }
646
647 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
648}
649
650int
651atombios_get_encoder_mode(struct drm_encoder *encoder)
652{
653 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
654 struct drm_device *dev = encoder->dev;
655 struct radeon_device *rdev = dev->dev_private;
656 struct drm_connector *connector;
657 struct radeon_connector *radeon_connector;
658 struct radeon_connector_atom_dig *dig_connector;
659
660 /* dp bridges are always DP */
661 if (radeon_encoder_is_dp_bridge(encoder))
662 return ATOM_ENCODER_MODE_DP;
663
664 /* DVO is always DVO */
665 if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
666 return ATOM_ENCODER_MODE_DVO;
667
668 connector = radeon_get_connector_for_encoder(encoder);
669 /* if we don't have an active device yet, just use one of
670 * the connectors tied to the encoder.
671 */
672 if (!connector)
673 connector = radeon_get_connector_for_encoder_init(encoder);
674 radeon_connector = to_radeon_connector(connector);
675
676 switch (connector->connector_type) {
677 case DRM_MODE_CONNECTOR_DVII:
678 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
679 if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
680 /* fix me */
681 if (ASIC_IS_DCE4(rdev))
682 return ATOM_ENCODER_MODE_DVI;
683 else
684 return ATOM_ENCODER_MODE_HDMI;
685 } else if (radeon_connector->use_digital)
686 return ATOM_ENCODER_MODE_DVI;
687 else
688 return ATOM_ENCODER_MODE_CRT;
689 break;
690 case DRM_MODE_CONNECTOR_DVID:
691 case DRM_MODE_CONNECTOR_HDMIA:
692 default:
693 if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
694 /* fix me */
695 if (ASIC_IS_DCE4(rdev))
696 return ATOM_ENCODER_MODE_DVI;
697 else
698 return ATOM_ENCODER_MODE_HDMI;
699 } else
700 return ATOM_ENCODER_MODE_DVI;
701 break;
702 case DRM_MODE_CONNECTOR_LVDS:
703 return ATOM_ENCODER_MODE_LVDS;
704 break;
705 case DRM_MODE_CONNECTOR_DisplayPort:
706 dig_connector = radeon_connector->con_priv;
707 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
708 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
709 return ATOM_ENCODER_MODE_DP;
710 else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
711 /* fix me */
712 if (ASIC_IS_DCE4(rdev))
713 return ATOM_ENCODER_MODE_DVI;
714 else
715 return ATOM_ENCODER_MODE_HDMI;
716 } else
717 return ATOM_ENCODER_MODE_DVI;
718 break;
719 case DRM_MODE_CONNECTOR_eDP:
720 return ATOM_ENCODER_MODE_DP;
721 case DRM_MODE_CONNECTOR_DVIA:
722 case DRM_MODE_CONNECTOR_VGA:
723 return ATOM_ENCODER_MODE_CRT;
724 break;
725 case DRM_MODE_CONNECTOR_Composite:
726 case DRM_MODE_CONNECTOR_SVIDEO:
727 case DRM_MODE_CONNECTOR_9PinDIN:
728 /* fix me */
729 return ATOM_ENCODER_MODE_TV;
730 /*return ATOM_ENCODER_MODE_CV;*/
731 break;
732 }
733}
734
735/*
736 * DIG Encoder/Transmitter Setup
737 *
738 * DCE 3.0/3.1
739 * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
740 * Supports up to 3 digital outputs
741 * - 2 DIG encoder blocks.
742 * DIG1 can drive UNIPHY link A or link B
743 * DIG2 can drive UNIPHY link B or LVTMA
744 *
745 * DCE 3.2
746 * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
747 * Supports up to 5 digital outputs
748 * - 2 DIG encoder blocks.
749 * DIG1/2 can drive UNIPHY0/1/2 link A or link B
750 *
751 * DCE 4.0/5.0
752 * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
753 * Supports up to 6 digital outputs
754 * - 6 DIG encoder blocks.
755 * - DIG to PHY mapping is hardcoded
756 * DIG1 drives UNIPHY0 link A, A+B
757 * DIG2 drives UNIPHY0 link B
758 * DIG3 drives UNIPHY1 link A, A+B
759 * DIG4 drives UNIPHY1 link B
760 * DIG5 drives UNIPHY2 link A, A+B
761 * DIG6 drives UNIPHY2 link B
762 *
763 * DCE 4.1
764 * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
765 * Supports up to 6 digital outputs
766 * - 2 DIG encoder blocks.
767 * DIG1/2 can drive UNIPHY0/1/2 link A or link B
768 *
769 * Routing
770 * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
771 * Examples:
772 * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
773 * crtc1 -> dig1 -> UNIPHY0 link B -> DP
774 * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
775 * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
776 */
777
778union dig_encoder_control {
779 DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
780 DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
781 DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
782 DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
783};
784
785void
786atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
787{
788 struct drm_device *dev = encoder->dev;
789 struct radeon_device *rdev = dev->dev_private;
790 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
791 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
792 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
793 union dig_encoder_control args;
794 int index = 0;
795 uint8_t frev, crev;
796 int dp_clock = 0;
797 int dp_lane_count = 0;
798 int hpd_id = RADEON_HPD_NONE;
799 int bpc = 8;
800
801 if (connector) {
802 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
803 struct radeon_connector_atom_dig *dig_connector =
804 radeon_connector->con_priv;
805
806 dp_clock = dig_connector->dp_clock;
807 dp_lane_count = dig_connector->dp_lane_count;
808 hpd_id = radeon_connector->hpd.hpd;
809 bpc = connector->display_info.bpc;
810 }
811
812 /* no dig encoder assigned */
813 if (dig->dig_encoder == -1)
814 return;
815
816 memset(&args, 0, sizeof(args));
817
818 if (ASIC_IS_DCE4(rdev))
819 index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
820 else {
821 if (dig->dig_encoder)
822 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
823 else
824 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
825 }
826
827 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
828 return;
829
830 args.v1.ucAction = action;
831 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
832 if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
833 args.v3.ucPanelMode = panel_mode;
834 else
835 args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
836
837 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
838 (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST))
839 args.v1.ucLaneNum = dp_lane_count;
840 else if (radeon_encoder->pixel_clock > 165000)
841 args.v1.ucLaneNum = 8;
842 else
843 args.v1.ucLaneNum = 4;
844
845 if (ASIC_IS_DCE5(rdev)) {
846 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
847 (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) {
848 if (dp_clock == 270000)
849 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
850 else if (dp_clock == 540000)
851 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
852 }
853 args.v4.acConfig.ucDigSel = dig->dig_encoder;
854 switch (bpc) {
855 case 0:
856 args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
857 break;
858 case 6:
859 args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
860 break;
861 case 8:
862 default:
863 args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
864 break;
865 case 10:
866 args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
867 break;
868 case 12:
869 args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
870 break;
871 case 16:
872 args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
873 break;
874 }
875 if (hpd_id == RADEON_HPD_NONE)
876 args.v4.ucHPD_ID = 0;
877 else
878 args.v4.ucHPD_ID = hpd_id + 1;
879 } else if (ASIC_IS_DCE4(rdev)) {
880 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
881 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
882 args.v3.acConfig.ucDigSel = dig->dig_encoder;
883 switch (bpc) {
884 case 0:
885 args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
886 break;
887 case 6:
888 args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
889 break;
890 case 8:
891 default:
892 args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
893 break;
894 case 10:
895 args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
896 break;
897 case 12:
898 args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
899 break;
900 case 16:
901 args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
902 break;
903 }
904 } else {
905 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
906 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
907 switch (radeon_encoder->encoder_id) {
908 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
909 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
910 break;
911 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
912 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
913 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
914 break;
915 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
916 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
917 break;
918 }
919 if (dig->linkb)
920 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
921 else
922 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
923 }
924
925 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
926
927}
928
929union dig_transmitter_control {
930 DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
931 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
932 DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
933 DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
934};
935
936void
937atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
938{
939 struct drm_device *dev = encoder->dev;
940 struct radeon_device *rdev = dev->dev_private;
941 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
942 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
943 struct drm_connector *connector;
944 union dig_transmitter_control args;
945 int index = 0;
946 uint8_t frev, crev;
947 bool is_dp = false;
948 int pll_id = 0;
949 int dp_clock = 0;
950 int dp_lane_count = 0;
951 int connector_object_id = 0;
952 int igp_lane_info = 0;
953 int dig_encoder = dig->dig_encoder;
954
955 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
956 connector = radeon_get_connector_for_encoder_init(encoder);
957 /* just needed to avoid bailing in the encoder check. the encoder
958 * isn't used for init
959 */
960 dig_encoder = 0;
961 } else
962 connector = radeon_get_connector_for_encoder(encoder);
963
964 if (connector) {
965 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
966 struct radeon_connector_atom_dig *dig_connector =
967 radeon_connector->con_priv;
968
969 dp_clock = dig_connector->dp_clock;
970 dp_lane_count = dig_connector->dp_lane_count;
971 connector_object_id =
972 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
973 igp_lane_info = dig_connector->igp_lane_info;
974 }
975
976 /* no dig encoder assigned */
977 if (dig_encoder == -1)
978 return;
979
980 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
981 is_dp = true;
982
983 memset(&args, 0, sizeof(args));
984
985 switch (radeon_encoder->encoder_id) {
986 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
987 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
988 break;
989 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
990 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
991 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
992 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
993 break;
994 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
995 index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
996 break;
997 }
998
999 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1000 return;
1001
1002 args.v1.ucAction = action;
1003 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
1004 args.v1.usInitInfo = cpu_to_le16(connector_object_id);
1005 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
1006 args.v1.asMode.ucLaneSel = lane_num;
1007 args.v1.asMode.ucLaneSet = lane_set;
1008 } else {
1009 if (is_dp)
1010 args.v1.usPixelClock =
1011 cpu_to_le16(dp_clock / 10);
1012 else if (radeon_encoder->pixel_clock > 165000)
1013 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
1014 else
1015 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
1016 }
1017 if (ASIC_IS_DCE4(rdev)) {
1018 if (is_dp)
1019 args.v3.ucLaneNum = dp_lane_count;
1020 else if (radeon_encoder->pixel_clock > 165000)
1021 args.v3.ucLaneNum = 8;
1022 else
1023 args.v3.ucLaneNum = 4;
1024
1025 if (dig->linkb)
1026 args.v3.acConfig.ucLinkSel = 1;
1027 if (dig_encoder & 1)
1028 args.v3.acConfig.ucEncoderSel = 1;
1029
1030 /* Select the PLL for the PHY
1031 * DP PHY should be clocked from external src if there is
1032 * one.
1033 */
1034 if (encoder->crtc) {
1035 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1036 pll_id = radeon_crtc->pll_id;
1037 }
1038
1039 if (ASIC_IS_DCE5(rdev)) {
1040 /* On DCE5 DCPLL usually generates the DP ref clock */
1041 if (is_dp) {
1042 if (rdev->clock.dp_extclk)
1043 args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
1044 else
1045 args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
1046 } else
1047 args.v4.acConfig.ucRefClkSource = pll_id;
1048 } else {
1049 /* On DCE4, if there is an external clock, it generates the DP ref clock */
1050 if (is_dp && rdev->clock.dp_extclk)
1051 args.v3.acConfig.ucRefClkSource = 2; /* external src */
1052 else
1053 args.v3.acConfig.ucRefClkSource = pll_id;
1054 }
1055
1056 switch (radeon_encoder->encoder_id) {
1057 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1058 args.v3.acConfig.ucTransmitterSel = 0;
1059 break;
1060 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1061 args.v3.acConfig.ucTransmitterSel = 1;
1062 break;
1063 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1064 args.v3.acConfig.ucTransmitterSel = 2;
1065 break;
1066 }
1067
1068 if (is_dp)
1069 args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
1070 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
1071 if (dig->coherent_mode)
1072 args.v3.acConfig.fCoherentMode = 1;
1073 if (radeon_encoder->pixel_clock > 165000)
1074 args.v3.acConfig.fDualLinkConnector = 1;
1075 }
1076 } else if (ASIC_IS_DCE32(rdev)) {
1077 args.v2.acConfig.ucEncoderSel = dig_encoder;
1078 if (dig->linkb)
1079 args.v2.acConfig.ucLinkSel = 1;
1080
1081 switch (radeon_encoder->encoder_id) {
1082 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1083 args.v2.acConfig.ucTransmitterSel = 0;
1084 break;
1085 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1086 args.v2.acConfig.ucTransmitterSel = 1;
1087 break;
1088 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1089 args.v2.acConfig.ucTransmitterSel = 2;
1090 break;
1091 }
1092
1093 if (is_dp) {
1094 args.v2.acConfig.fCoherentMode = 1;
1095 args.v2.acConfig.fDPConnector = 1;
1096 } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
1097 if (dig->coherent_mode)
1098 args.v2.acConfig.fCoherentMode = 1;
1099 if (radeon_encoder->pixel_clock > 165000)
1100 args.v2.acConfig.fDualLinkConnector = 1;
1101 }
1102 } else {
1103 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
1104
1105 if (dig_encoder)
1106 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
1107 else
1108 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
1109
1110 if ((rdev->flags & RADEON_IS_IGP) &&
1111 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
1112 if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
1113 if (igp_lane_info & 0x1)
1114 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
1115 else if (igp_lane_info & 0x2)
1116 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
1117 else if (igp_lane_info & 0x4)
1118 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
1119 else if (igp_lane_info & 0x8)
1120 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
1121 } else {
1122 if (igp_lane_info & 0x3)
1123 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
1124 else if (igp_lane_info & 0xc)
1125 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
1126 }
1127 }
1128
1129 if (dig->linkb)
1130 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
1131 else
1132 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
1133
1134 if (is_dp)
1135 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
1136 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
1137 if (dig->coherent_mode)
1138 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
1139 if (radeon_encoder->pixel_clock > 165000)
1140 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
1141 }
1142 }
1143
1144 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1145}
1146
1147bool
1148atombios_set_edp_panel_power(struct drm_connector *connector, int action)
1149{
1150 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1151 struct drm_device *dev = radeon_connector->base.dev;
1152 struct radeon_device *rdev = dev->dev_private;
1153 union dig_transmitter_control args;
1154 int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
1155 uint8_t frev, crev;
1156
1157 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
1158 goto done;
1159
1160 if (!ASIC_IS_DCE4(rdev))
1161 goto done;
1162
1163 if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
1164 (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
1165 goto done;
1166
1167 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1168 goto done;
1169
1170 memset(&args, 0, sizeof(args));
1171
1172 args.v1.ucAction = action;
1173
1174 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1175
1176 /* wait for the panel to power up */
1177 if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
1178 int i;
1179
1180 for (i = 0; i < 300; i++) {
1181 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
1182 return true;
1183 mdelay(1);
1184 }
1185 return false;
1186 }
1187done:
1188 return true;
1189}
1190
1191union external_encoder_control {
1192 EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
1193 EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
1194};
1195
1196static void
1197atombios_external_encoder_setup(struct drm_encoder *encoder,
1198 struct drm_encoder *ext_encoder,
1199 int action)
1200{
1201 struct drm_device *dev = encoder->dev;
1202 struct radeon_device *rdev = dev->dev_private;
1203 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1204 struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
1205 union external_encoder_control args;
1206 struct drm_connector *connector;
1207 int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
1208 u8 frev, crev;
1209 int dp_clock = 0;
1210 int dp_lane_count = 0;
1211 int connector_object_id = 0;
1212 u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
1213 int bpc = 8;
1214
1215 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
1216 connector = radeon_get_connector_for_encoder_init(encoder);
1217 else
1218 connector = radeon_get_connector_for_encoder(encoder);
1219
1220 if (connector) {
1221 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1222 struct radeon_connector_atom_dig *dig_connector =
1223 radeon_connector->con_priv;
1224
1225 dp_clock = dig_connector->dp_clock;
1226 dp_lane_count = dig_connector->dp_lane_count;
1227 connector_object_id =
1228 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
1229 bpc = connector->display_info.bpc;
1230 }
1231
1232 memset(&args, 0, sizeof(args));
1233
1234 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1235 return;
1236
1237 switch (frev) {
1238 case 1:
1239 /* no params on frev 1 */
1240 break;
1241 case 2:
1242 switch (crev) {
1243 case 1:
1244 case 2:
1245 args.v1.sDigEncoder.ucAction = action;
1246 args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
1247 args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
1248
1249 if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
1250 if (dp_clock == 270000)
1251 args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
1252 args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
1253 } else if (radeon_encoder->pixel_clock > 165000)
1254 args.v1.sDigEncoder.ucLaneNum = 8;
1255 else
1256 args.v1.sDigEncoder.ucLaneNum = 4;
1257 break;
1258 case 3:
1259 args.v3.sExtEncoder.ucAction = action;
1260 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
1261 args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
1262 else
1263 args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
1264 args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
1265
1266 if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
1267 if (dp_clock == 270000)
1268 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
1269 else if (dp_clock == 540000)
1270 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
1271 args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
1272 } else if (radeon_encoder->pixel_clock > 165000)
1273 args.v3.sExtEncoder.ucLaneNum = 8;
1274 else
1275 args.v3.sExtEncoder.ucLaneNum = 4;
1276 switch (ext_enum) {
1277 case GRAPH_OBJECT_ENUM_ID1:
1278 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
1279 break;
1280 case GRAPH_OBJECT_ENUM_ID2:
1281 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
1282 break;
1283 case GRAPH_OBJECT_ENUM_ID3:
1284 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
1285 break;
1286 }
1287 switch (bpc) {
1288 case 0:
1289 args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
1290 break;
1291 case 6:
1292 args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
1293 break;
1294 case 8:
1295 default:
1296 args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
1297 break;
1298 case 10:
1299 args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
1300 break;
1301 case 12:
1302 args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
1303 break;
1304 case 16:
1305 args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
1306 break;
1307 }
1308 break;
1309 default:
1310 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
1311 return;
1312 }
1313 break;
1314 default:
1315 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
1316 return;
1317 }
1318 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1319}
1320
1321static void
1322atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
1323{
1324 struct drm_device *dev = encoder->dev;
1325 struct radeon_device *rdev = dev->dev_private;
1326 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1327 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1328 ENABLE_YUV_PS_ALLOCATION args;
1329 int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
1330 uint32_t temp, reg;
1331
1332 memset(&args, 0, sizeof(args));
1333
1334 if (rdev->family >= CHIP_R600)
1335 reg = R600_BIOS_3_SCRATCH;
1336 else
1337 reg = RADEON_BIOS_3_SCRATCH;
1338
1339 /* XXX: fix up scratch reg handling */
1340 temp = RREG32(reg);
1341 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1342 WREG32(reg, (ATOM_S3_TV1_ACTIVE |
1343 (radeon_crtc->crtc_id << 18)));
1344 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
1345 WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
1346 else
1347 WREG32(reg, 0);
1348
1349 if (enable)
1350 args.ucEnable = ATOM_ENABLE;
1351 args.ucCRTC = radeon_crtc->crtc_id;
1352
1353 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1354
1355 WREG32(reg, temp);
1356}
1357
1358static void
1359radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1360{
1361 struct drm_device *dev = encoder->dev;
1362 struct radeon_device *rdev = dev->dev_private;
1363 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1364 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
1365 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
1366 int index = 0;
1367 bool is_dig = false;
1368 bool is_dce5_dac = false;
1369 bool is_dce5_dvo = false;
1370
1371 memset(&args, 0, sizeof(args));
1372
1373 DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
1374 radeon_encoder->encoder_id, mode, radeon_encoder->devices,
1375 radeon_encoder->active_device);
1376 switch (radeon_encoder->encoder_id) {
1377 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1378 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1379 index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
1380 break;
1381 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1382 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1383 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1384 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1385 is_dig = true;
1386 break;
1387 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1388 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1389 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
1390 break;
1391 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1392 if (ASIC_IS_DCE5(rdev))
1393 is_dce5_dvo = true;
1394 else if (ASIC_IS_DCE3(rdev))
1395 is_dig = true;
1396 else
1397 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
1398 break;
1399 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1400 index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
1401 break;
1402 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
1403 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1404 index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
1405 else
1406 index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
1407 break;
1408 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1409 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1410 if (ASIC_IS_DCE5(rdev))
1411 is_dce5_dac = true;
1412 else {
1413 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1414 index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
1415 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
1416 index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
1417 else
1418 index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
1419 }
1420 break;
1421 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1422 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1423 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1424 index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
1425 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
1426 index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
1427 else
1428 index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
1429 break;
1430 }
1431
1432 if (is_dig) {
1433 switch (mode) {
1434 case DRM_MODE_DPMS_ON:
1435 /* some early dce3.2 boards have a bug in their transmitter control table */
1436 if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
1437 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1438 else
1439 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1440 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1441 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1442
1443 if (connector &&
1444 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
1445 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1446 struct radeon_connector_atom_dig *radeon_dig_connector =
1447 radeon_connector->con_priv;
1448 atombios_set_edp_panel_power(connector,
1449 ATOM_TRANSMITTER_ACTION_POWER_ON);
1450 radeon_dig_connector->edp_on = true;
1451 }
1452 if (ASIC_IS_DCE4(rdev))
1453 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
1454 radeon_dp_link_train(encoder, connector);
1455 if (ASIC_IS_DCE4(rdev))
1456 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
1457 }
1458 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1459 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
1460 break;
1461 case DRM_MODE_DPMS_STANDBY:
1462 case DRM_MODE_DPMS_SUSPEND:
1463 case DRM_MODE_DPMS_OFF:
1464 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1465 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1466 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1467
1468 if (ASIC_IS_DCE4(rdev))
1469 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
1470 if (connector &&
1471 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
1472 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1473 struct radeon_connector_atom_dig *radeon_dig_connector =
1474 radeon_connector->con_priv;
1475 atombios_set_edp_panel_power(connector,
1476 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1477 radeon_dig_connector->edp_on = false;
1478 }
1479 }
1480 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1481 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
1482 break;
1483 }
1484 } else if (is_dce5_dac) {
1485 switch (mode) {
1486 case DRM_MODE_DPMS_ON:
1487 atombios_dac_setup(encoder, ATOM_ENABLE);
1488 break;
1489 case DRM_MODE_DPMS_STANDBY:
1490 case DRM_MODE_DPMS_SUSPEND:
1491 case DRM_MODE_DPMS_OFF:
1492 atombios_dac_setup(encoder, ATOM_DISABLE);
1493 break;
1494 }
1495 } else if (is_dce5_dvo) {
1496 switch (mode) {
1497 case DRM_MODE_DPMS_ON:
1498 atombios_dvo_setup(encoder, ATOM_ENABLE);
1499 break;
1500 case DRM_MODE_DPMS_STANDBY:
1501 case DRM_MODE_DPMS_SUSPEND:
1502 case DRM_MODE_DPMS_OFF:
1503 atombios_dvo_setup(encoder, ATOM_DISABLE);
1504 break;
1505 }
1506 } else {
1507 switch (mode) {
1508 case DRM_MODE_DPMS_ON:
1509 args.ucAction = ATOM_ENABLE;
1510 /* workaround for DVOOutputControl on some RS690 systems */
1511 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
1512 u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
1513 WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
1514 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1515 WREG32(RADEON_BIOS_3_SCRATCH, reg);
1516 } else
1517 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1518 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1519 args.ucAction = ATOM_LCD_BLON;
1520 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1521 }
1522 break;
1523 case DRM_MODE_DPMS_STANDBY:
1524 case DRM_MODE_DPMS_SUSPEND:
1525 case DRM_MODE_DPMS_OFF:
1526 args.ucAction = ATOM_DISABLE;
1527 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1528 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1529 args.ucAction = ATOM_LCD_BLOFF;
1530 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1531 }
1532 break;
1533 }
1534 }
1535
1536 if (ext_encoder) {
1537 switch (mode) {
1538 case DRM_MODE_DPMS_ON:
1539 default:
1540 if (ASIC_IS_DCE41(rdev)) {
1541 atombios_external_encoder_setup(encoder, ext_encoder,
1542 EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
1543 atombios_external_encoder_setup(encoder, ext_encoder,
1544 EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
1545 } else
1546 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1547 break;
1548 case DRM_MODE_DPMS_STANDBY:
1549 case DRM_MODE_DPMS_SUSPEND:
1550 case DRM_MODE_DPMS_OFF:
1551 if (ASIC_IS_DCE41(rdev)) {
1552 atombios_external_encoder_setup(encoder, ext_encoder,
1553 EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
1554 atombios_external_encoder_setup(encoder, ext_encoder,
1555 EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
1556 } else
1557 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
1558 break;
1559 }
1560 }
1561
1562 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
1563
1564}
1565
1566union crtc_source_param {
1567 SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
1568 SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
1569};
1570
1571static void
1572atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1573{
1574 struct drm_device *dev = encoder->dev;
1575 struct radeon_device *rdev = dev->dev_private;
1576 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1577 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1578 union crtc_source_param args;
1579 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
1580 uint8_t frev, crev;
1581 struct radeon_encoder_atom_dig *dig;
1582
1583 memset(&args, 0, sizeof(args));
1584
1585 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1586 return;
1587
1588 switch (frev) {
1589 case 1:
1590 switch (crev) {
1591 case 1:
1592 default:
1593 if (ASIC_IS_AVIVO(rdev))
1594 args.v1.ucCRTC = radeon_crtc->crtc_id;
1595 else {
1596 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
1597 args.v1.ucCRTC = radeon_crtc->crtc_id;
1598 } else {
1599 args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
1600 }
1601 }
1602 switch (radeon_encoder->encoder_id) {
1603 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1604 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1605 args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
1606 break;
1607 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1608 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
1609 if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
1610 args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
1611 else
1612 args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
1613 break;
1614 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1615 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1616 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1617 args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
1618 break;
1619 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1620 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1621 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1622 args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
1623 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
1624 args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
1625 else
1626 args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
1627 break;
1628 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1629 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1630 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1631 args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
1632 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
1633 args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
1634 else
1635 args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
1636 break;
1637 }
1638 break;
1639 case 2:
1640 args.v2.ucCRTC = radeon_crtc->crtc_id;
1641 if (radeon_encoder_is_dp_bridge(encoder)) {
1642 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1643
1644 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
1645 args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
1646 else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
1647 args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
1648 else
1649 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
1650 } else
1651 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
1652 switch (radeon_encoder->encoder_id) {
1653 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1654 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1655 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1656 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1657 dig = radeon_encoder->enc_priv;
1658 switch (dig->dig_encoder) {
1659 case 0:
1660 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1661 break;
1662 case 1:
1663 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1664 break;
1665 case 2:
1666 args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
1667 break;
1668 case 3:
1669 args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
1670 break;
1671 case 4:
1672 args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
1673 break;
1674 case 5:
1675 args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
1676 break;
1677 }
1678 break;
1679 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1680 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
1681 break;
1682 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1683 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1684 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
1685 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
1686 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
1687 else
1688 args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
1689 break;
1690 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1691 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1692 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
1693 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
1694 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
1695 else
1696 args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
1697 break;
1698 }
1699 break;
1700 }
1701 break;
1702 default:
1703 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
1704 return;
1705 }
1706
1707 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1708
1709 /* update scratch regs with new routing */
1710 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
1711}
1712
1713static void
1714atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1715 struct drm_display_mode *mode)
1716{
1717 struct drm_device *dev = encoder->dev;
1718 struct radeon_device *rdev = dev->dev_private;
1719 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1720 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1721
1722 /* Funky macbooks */
1723 if ((dev->pdev->device == 0x71C5) &&
1724 (dev->pdev->subsystem_vendor == 0x106b) &&
1725 (dev->pdev->subsystem_device == 0x0080)) {
1726 if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
1727 uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
1728
1729 lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
1730 lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
1731
1732 WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
1733 }
1734 }
1735
1736 /* set scaler clears this on some chips */
1737 if (ASIC_IS_AVIVO(rdev) &&
1738 (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
1739 if (ASIC_IS_DCE4(rdev)) {
1740 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1741 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
1742 EVERGREEN_INTERLEAVE_EN);
1743 else
1744 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1745 } else {
1746 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1747 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
1748 AVIVO_D1MODE_INTERLEAVE_EN);
1749 else
1750 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1751 }
1752 }
1753}
1754
1755static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1756{
1757 struct drm_device *dev = encoder->dev;
1758 struct radeon_device *rdev = dev->dev_private;
1759 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1760 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1761 struct drm_encoder *test_encoder;
1762 struct radeon_encoder_atom_dig *dig;
1763 uint32_t dig_enc_in_use = 0;
1764
1765 /* DCE4/5 */
1766 if (ASIC_IS_DCE4(rdev)) {
1767 dig = radeon_encoder->enc_priv;
1768 if (ASIC_IS_DCE41(rdev)) {
1769 /* ontario follows DCE4 */
1770 if (rdev->family == CHIP_PALM) {
1771 if (dig->linkb)
1772 return 1;
1773 else
1774 return 0;
1775 } else
1776 /* llano follows DCE3.2 */
1777 return radeon_crtc->crtc_id;
1778 } else {
1779 switch (radeon_encoder->encoder_id) {
1780 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1781 if (dig->linkb)
1782 return 1;
1783 else
1784 return 0;
1785 break;
1786 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1787 if (dig->linkb)
1788 return 3;
1789 else
1790 return 2;
1791 break;
1792 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1793 if (dig->linkb)
1794 return 5;
1795 else
1796 return 4;
1797 break;
1798 }
1799 }
1800 }
1801
1802 /* on DCE32 and encoder can driver any block so just crtc id */
1803 if (ASIC_IS_DCE32(rdev)) {
1804 return radeon_crtc->crtc_id;
1805 }
1806
1807 /* on DCE3 - LVTMA can only be driven by DIGB */
1808 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
1809 struct radeon_encoder *radeon_test_encoder;
1810
1811 if (encoder == test_encoder)
1812 continue;
1813
1814 if (!radeon_encoder_is_digital(test_encoder))
1815 continue;
1816
1817 radeon_test_encoder = to_radeon_encoder(test_encoder);
1818 dig = radeon_test_encoder->enc_priv;
1819
1820 if (dig->dig_encoder >= 0)
1821 dig_enc_in_use |= (1 << dig->dig_encoder);
1822 }
1823
1824 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
1825 if (dig_enc_in_use & 0x2)
1826 DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
1827 return 1;
1828 }
1829 if (!(dig_enc_in_use & 1))
1830 return 0;
1831 return 1;
1832}
1833
1834/* This only needs to be called once at startup */
1835void
1836radeon_atom_encoder_init(struct radeon_device *rdev)
1837{
1838 struct drm_device *dev = rdev->ddev;
1839 struct drm_encoder *encoder;
1840
1841 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1842 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1843 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
1844
1845 switch (radeon_encoder->encoder_id) {
1846 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1847 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1848 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1849 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1850 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
1851 break;
1852 default:
1853 break;
1854 }
1855
1856 if (ext_encoder && ASIC_IS_DCE41(rdev))
1857 atombios_external_encoder_setup(encoder, ext_encoder,
1858 EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
1859 }
1860}
1861
1862static void
1863radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1864 struct drm_display_mode *mode,
1865 struct drm_display_mode *adjusted_mode)
1866{
1867 struct drm_device *dev = encoder->dev;
1868 struct radeon_device *rdev = dev->dev_private;
1869 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1870 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
1871
1872 radeon_encoder->pixel_clock = adjusted_mode->clock;
1873
1874 if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
1875 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
1876 atombios_yuv_setup(encoder, true);
1877 else
1878 atombios_yuv_setup(encoder, false);
1879 }
1880
1881 switch (radeon_encoder->encoder_id) {
1882 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1883 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1884 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1885 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
1886 atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
1887 break;
1888 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1889 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1890 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1891 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1892 if (ASIC_IS_DCE4(rdev)) {
1893 /* disable the transmitter */
1894 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1895 /* setup and enable the encoder */
1896 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1897
1898 /* enable the transmitter */
1899 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1900 } else {
1901 /* disable the encoder and transmitter */
1902 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1903 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
1904
1905 /* setup and enable the encoder and transmitter */
1906 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
1907 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1908 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1909 }
1910 break;
1911 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1912 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1913 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1914 atombios_dvo_setup(encoder, ATOM_ENABLE);
1915 break;
1916 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1917 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1918 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1919 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1920 atombios_dac_setup(encoder, ATOM_ENABLE);
1921 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
1922 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1923 atombios_tv_setup(encoder, ATOM_ENABLE);
1924 else
1925 atombios_tv_setup(encoder, ATOM_DISABLE);
1926 }
1927 break;
1928 }
1929
1930 if (ext_encoder) {
1931 if (ASIC_IS_DCE41(rdev))
1932 atombios_external_encoder_setup(encoder, ext_encoder,
1933 EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
1934 else
1935 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1936 }
1937
1938 atombios_apply_encoder_quirks(encoder, adjusted_mode);
1939
1940 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
1941 r600_hdmi_enable(encoder);
1942 r600_hdmi_setmode(encoder, adjusted_mode);
1943 }
1944}
1945
1946static bool
1947atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1948{
1949 struct drm_device *dev = encoder->dev;
1950 struct radeon_device *rdev = dev->dev_private;
1951 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1952 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1953
1954 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
1955 ATOM_DEVICE_CV_SUPPORT |
1956 ATOM_DEVICE_CRT_SUPPORT)) {
1957 DAC_LOAD_DETECTION_PS_ALLOCATION args;
1958 int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
1959 uint8_t frev, crev;
1960
1961 memset(&args, 0, sizeof(args));
1962
1963 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1964 return false;
1965
1966 args.sDacload.ucMisc = 0;
1967
1968 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
1969 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
1970 args.sDacload.ucDacType = ATOM_DAC_A;
1971 else
1972 args.sDacload.ucDacType = ATOM_DAC_B;
1973
1974 if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
1975 args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
1976 else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
1977 args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
1978 else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
1979 args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
1980 if (crev >= 3)
1981 args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
1982 } else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
1983 args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
1984 if (crev >= 3)
1985 args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
1986 }
1987
1988 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1989
1990 return true;
1991 } else
1992 return false;
1993}
1994
1995static enum drm_connector_status
1996radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1997{
1998 struct drm_device *dev = encoder->dev;
1999 struct radeon_device *rdev = dev->dev_private;
2000 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2001 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
2002 uint32_t bios_0_scratch;
2003
2004 if (!atombios_dac_load_detect(encoder, connector)) {
2005 DRM_DEBUG_KMS("detect returned false \n");
2006 return connector_status_unknown;
2007 }
2008
2009 if (rdev->family >= CHIP_R600)
2010 bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
2011 else
2012 bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
2013
2014 DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
2015 if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
2016 if (bios_0_scratch & ATOM_S0_CRT1_MASK)
2017 return connector_status_connected;
2018 }
2019 if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
2020 if (bios_0_scratch & ATOM_S0_CRT2_MASK)
2021 return connector_status_connected;
2022 }
2023 if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
2024 if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
2025 return connector_status_connected;
2026 }
2027 if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
2028 if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
2029 return connector_status_connected; /* CTV */
2030 else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
2031 return connector_status_connected; /* STV */
2032 }
2033 return connector_status_disconnected;
2034}
2035
2036static enum drm_connector_status
2037radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
2038{
2039 struct drm_device *dev = encoder->dev;
2040 struct radeon_device *rdev = dev->dev_private;
2041 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2042 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
2043 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
2044 u32 bios_0_scratch;
2045
2046 if (!ASIC_IS_DCE4(rdev))
2047 return connector_status_unknown;
2048
2049 if (!ext_encoder)
2050 return connector_status_unknown;
2051
2052 if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
2053 return connector_status_unknown;
2054
2055 /* load detect on the dp bridge */
2056 atombios_external_encoder_setup(encoder, ext_encoder,
2057 EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
2058
2059 bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
2060
2061 DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
2062 if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
2063 if (bios_0_scratch & ATOM_S0_CRT1_MASK)
2064 return connector_status_connected;
2065 }
2066 if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
2067 if (bios_0_scratch & ATOM_S0_CRT2_MASK)
2068 return connector_status_connected;
2069 }
2070 if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
2071 if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
2072 return connector_status_connected;
2073 }
2074 if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
2075 if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
2076 return connector_status_connected; /* CTV */
2077 else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
2078 return connector_status_connected; /* STV */
2079 }
2080 return connector_status_disconnected;
2081}
2082
2083void
2084radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
2085{
2086 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
2087
2088 if (ext_encoder)
2089 /* ddc_setup on the dp bridge */
2090 atombios_external_encoder_setup(encoder, ext_encoder,
2091 EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
2092
2093}
2094
2095static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
2096{
2097 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2098 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
2099
2100 if ((radeon_encoder->active_device &
2101 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
2102 radeon_encoder_is_dp_bridge(encoder)) {
2103 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
2104 if (dig)
2105 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
2106 }
2107
2108 radeon_atom_output_lock(encoder, true);
2109 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2110
2111 if (connector) {
2112 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
2113
2114 /* select the clock/data port if it uses a router */
2115 if (radeon_connector->router.cd_valid)
2116 radeon_router_select_cd_port(radeon_connector);
2117
2118 /* turn eDP panel on for mode set */
2119 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
2120 atombios_set_edp_panel_power(connector,
2121 ATOM_TRANSMITTER_ACTION_POWER_ON);
2122 }
2123
2124 /* this is needed for the pll/ss setup to work correctly in some cases */
2125 atombios_set_encoder_crtc_source(encoder);
2126}
2127
2128static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
2129{
2130 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
2131 radeon_atom_output_lock(encoder, false);
2132}
2133
2134static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
2135{
2136 struct drm_device *dev = encoder->dev;
2137 struct radeon_device *rdev = dev->dev_private;
2138 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2139 struct radeon_encoder_atom_dig *dig;
2140
2141 /* check for pre-DCE3 cards with shared encoders;
2142 * can't really use the links individually, so don't disable
2143 * the encoder if it's in use by another connector
2144 */
2145 if (!ASIC_IS_DCE3(rdev)) {
2146 struct drm_encoder *other_encoder;
2147 struct radeon_encoder *other_radeon_encoder;
2148
2149 list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
2150 other_radeon_encoder = to_radeon_encoder(other_encoder);
2151 if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
2152 drm_helper_encoder_in_use(other_encoder))
2153 goto disable_done;
2154 }
2155 }
2156
2157 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2158
2159 switch (radeon_encoder->encoder_id) {
2160 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
2161 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
2162 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
2163 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
2164 atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
2165 break;
2166 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2167 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2168 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2169 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2170 if (ASIC_IS_DCE4(rdev))
2171 /* disable the transmitter */
2172 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
2173 else {
2174 /* disable the encoder and transmitter */
2175 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
2176 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
2177 }
2178 break;
2179 case ENCODER_OBJECT_ID_INTERNAL_DDI:
2180 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
2181 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
2182 atombios_dvo_setup(encoder, ATOM_DISABLE);
2183 break;
2184 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
2185 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
2186 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
2187 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
2188 atombios_dac_setup(encoder, ATOM_DISABLE);
2189 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
2190 atombios_tv_setup(encoder, ATOM_DISABLE);
2191 break;
2192 }
2193
2194disable_done:
2195 if (radeon_encoder_is_digital(encoder)) {
2196 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
2197 r600_hdmi_disable(encoder);
2198 dig = radeon_encoder->enc_priv;
2199 dig->dig_encoder = -1;
2200 }
2201 radeon_encoder->active_device = 0;
2202}
2203
2204/* these are handled by the primary encoders */
2205static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
2206{
2207
2208}
2209
2210static void radeon_atom_ext_commit(struct drm_encoder *encoder)
2211{
2212
2213}
2214
2215static void
2216radeon_atom_ext_mode_set(struct drm_encoder *encoder,
2217 struct drm_display_mode *mode,
2218 struct drm_display_mode *adjusted_mode)
2219{
2220
2221}
2222
2223static void radeon_atom_ext_disable(struct drm_encoder *encoder)
2224{
2225
2226}
2227
2228static void
2229radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
2230{
2231
2232}
2233
2234static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
2235 struct drm_display_mode *mode,
2236 struct drm_display_mode *adjusted_mode)
2237{
2238 return true;
2239}
2240
2241static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
2242 .dpms = radeon_atom_ext_dpms,
2243 .mode_fixup = radeon_atom_ext_mode_fixup,
2244 .prepare = radeon_atom_ext_prepare,
2245 .mode_set = radeon_atom_ext_mode_set,
2246 .commit = radeon_atom_ext_commit,
2247 .disable = radeon_atom_ext_disable,
2248 /* no detect for TMDS/LVDS yet */
2249};
2250
2251static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
2252 .dpms = radeon_atom_encoder_dpms,
2253 .mode_fixup = radeon_atom_mode_fixup,
2254 .prepare = radeon_atom_encoder_prepare,
2255 .mode_set = radeon_atom_encoder_mode_set,
2256 .commit = radeon_atom_encoder_commit,
2257 .disable = radeon_atom_encoder_disable,
2258 .detect = radeon_atom_dig_detect,
2259};
2260
2261static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
2262 .dpms = radeon_atom_encoder_dpms,
2263 .mode_fixup = radeon_atom_mode_fixup,
2264 .prepare = radeon_atom_encoder_prepare,
2265 .mode_set = radeon_atom_encoder_mode_set,
2266 .commit = radeon_atom_encoder_commit,
2267 .detect = radeon_atom_dac_detect,
2268};
2269
2270void radeon_enc_destroy(struct drm_encoder *encoder)
2271{
2272 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2273 kfree(radeon_encoder->enc_priv);
2274 drm_encoder_cleanup(encoder);
2275 kfree(radeon_encoder);
2276}
2277
2278static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
2279 .destroy = radeon_enc_destroy,
2280};
2281
2282struct radeon_encoder_atom_dac *
2283radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
2284{
2285 struct drm_device *dev = radeon_encoder->base.dev;
2286 struct radeon_device *rdev = dev->dev_private;
2287 struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
2288
2289 if (!dac)
2290 return NULL;
2291
2292 dac->tv_std = radeon_atombios_get_tv_info(rdev);
2293 return dac;
2294}
2295
2296struct radeon_encoder_atom_dig *
2297radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
2298{
2299 int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
2300 struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
2301
2302 if (!dig)
2303 return NULL;
2304
2305 /* coherent mode by default */
2306 dig->coherent_mode = true;
2307 dig->dig_encoder = -1;
2308
2309 if (encoder_enum == 2)
2310 dig->linkb = true;
2311 else
2312 dig->linkb = false;
2313
2314 return dig;
2315}
2316
2317void
2318radeon_add_atom_encoder(struct drm_device *dev,
2319 uint32_t encoder_enum,
2320 uint32_t supported_device,
2321 u16 caps)
2322{
2323 struct radeon_device *rdev = dev->dev_private;
2324 struct drm_encoder *encoder;
2325 struct radeon_encoder *radeon_encoder;
2326
2327 /* see if we already added it */
2328 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2329 radeon_encoder = to_radeon_encoder(encoder);
2330 if (radeon_encoder->encoder_enum == encoder_enum) {
2331 radeon_encoder->devices |= supported_device;
2332 return;
2333 }
2334
2335 }
2336
2337 /* add a new one */
2338 radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
2339 if (!radeon_encoder)
2340 return;
2341
2342 encoder = &radeon_encoder->base;
2343 switch (rdev->num_crtc) {
2344 case 1:
2345 encoder->possible_crtcs = 0x1;
2346 break;
2347 case 2:
2348 default:
2349 encoder->possible_crtcs = 0x3;
2350 break;
2351 case 4:
2352 encoder->possible_crtcs = 0xf;
2353 break;
2354 case 6:
2355 encoder->possible_crtcs = 0x3f;
2356 break;
2357 }
2358
2359 radeon_encoder->enc_priv = NULL;
2360
2361 radeon_encoder->encoder_enum = encoder_enum;
2362 radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
2363 radeon_encoder->devices = supported_device;
2364 radeon_encoder->rmx_type = RMX_OFF;
2365 radeon_encoder->underscan_type = UNDERSCAN_OFF;
2366 radeon_encoder->is_ext_encoder = false;
2367 radeon_encoder->caps = caps;
2368
2369 switch (radeon_encoder->encoder_id) {
2370 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
2371 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
2372 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
2373 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
2374 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2375 radeon_encoder->rmx_type = RMX_FULL;
2376 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
2377 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
2378 } else {
2379 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
2380 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
2381 }
2382 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
2383 break;
2384 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
2385 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
2386 radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
2387 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
2388 break;
2389 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
2390 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
2391 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
2392 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
2393 radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
2394 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
2395 break;
2396 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
2397 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
2398 case ENCODER_OBJECT_ID_INTERNAL_DDI:
2399 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2400 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2401 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2402 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2403 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2404 radeon_encoder->rmx_type = RMX_FULL;
2405 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
2406 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
2407 } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
2408 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
2409 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
2410 } else {
2411 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
2412 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
2413 }
2414 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
2415 break;
2416 case ENCODER_OBJECT_ID_SI170B:
2417 case ENCODER_OBJECT_ID_CH7303:
2418 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
2419 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
2420 case ENCODER_OBJECT_ID_TITFP513:
2421 case ENCODER_OBJECT_ID_VT1623:
2422 case ENCODER_OBJECT_ID_HDMI_SI1930:
2423 case ENCODER_OBJECT_ID_TRAVIS:
2424 case ENCODER_OBJECT_ID_NUTMEG:
2425 /* these are handled by the primary encoders */
2426 radeon_encoder->is_ext_encoder = true;
2427 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2428 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
2429 else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
2430 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
2431 else
2432 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
2433 drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
2434 break;
2435 }
2436}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index fdc3a9a54bf8..ba7ab79e12c1 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -49,27 +49,27 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
49 rdev->gart.table_size >> PAGE_SHIFT); 49 rdev->gart.table_size >> PAGE_SHIFT);
50 } 50 }
51#endif 51#endif
52 rdev->gart.table.ram.ptr = ptr; 52 rdev->gart.ptr = ptr;
53 memset((void *)rdev->gart.table.ram.ptr, 0, rdev->gart.table_size); 53 memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
54 return 0; 54 return 0;
55} 55}
56 56
57void radeon_gart_table_ram_free(struct radeon_device *rdev) 57void radeon_gart_table_ram_free(struct radeon_device *rdev)
58{ 58{
59 if (rdev->gart.table.ram.ptr == NULL) { 59 if (rdev->gart.ptr == NULL) {
60 return; 60 return;
61 } 61 }
62#ifdef CONFIG_X86 62#ifdef CONFIG_X86
63 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || 63 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
64 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { 64 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
65 set_memory_wb((unsigned long)rdev->gart.table.ram.ptr, 65 set_memory_wb((unsigned long)rdev->gart.ptr,
66 rdev->gart.table_size >> PAGE_SHIFT); 66 rdev->gart.table_size >> PAGE_SHIFT);
67 } 67 }
68#endif 68#endif
69 pci_free_consistent(rdev->pdev, rdev->gart.table_size, 69 pci_free_consistent(rdev->pdev, rdev->gart.table_size,
70 (void *)rdev->gart.table.ram.ptr, 70 (void *)rdev->gart.ptr,
71 rdev->gart.table_addr); 71 rdev->gart.table_addr);
72 rdev->gart.table.ram.ptr = NULL; 72 rdev->gart.ptr = NULL;
73 rdev->gart.table_addr = 0; 73 rdev->gart.table_addr = 0;
74} 74}
75 75
@@ -77,10 +77,10 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
77{ 77{
78 int r; 78 int r;
79 79
80 if (rdev->gart.table.vram.robj == NULL) { 80 if (rdev->gart.robj == NULL) {
81 r = radeon_bo_create(rdev, rdev->gart.table_size, 81 r = radeon_bo_create(rdev, rdev->gart.table_size,
82 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 82 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
83 &rdev->gart.table.vram.robj); 83 &rdev->gart.robj);
84 if (r) { 84 if (r) {
85 return r; 85 return r;
86 } 86 }
@@ -93,38 +93,46 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
93 uint64_t gpu_addr; 93 uint64_t gpu_addr;
94 int r; 94 int r;
95 95
96 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); 96 r = radeon_bo_reserve(rdev->gart.robj, false);
97 if (unlikely(r != 0)) 97 if (unlikely(r != 0))
98 return r; 98 return r;
99 r = radeon_bo_pin(rdev->gart.table.vram.robj, 99 r = radeon_bo_pin(rdev->gart.robj,
100 RADEON_GEM_DOMAIN_VRAM, &gpu_addr); 100 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
101 if (r) { 101 if (r) {
102 radeon_bo_unreserve(rdev->gart.table.vram.robj); 102 radeon_bo_unreserve(rdev->gart.robj);
103 return r; 103 return r;
104 } 104 }
105 r = radeon_bo_kmap(rdev->gart.table.vram.robj, 105 r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
106 (void **)&rdev->gart.table.vram.ptr);
107 if (r) 106 if (r)
108 radeon_bo_unpin(rdev->gart.table.vram.robj); 107 radeon_bo_unpin(rdev->gart.robj);
109 radeon_bo_unreserve(rdev->gart.table.vram.robj); 108 radeon_bo_unreserve(rdev->gart.robj);
110 rdev->gart.table_addr = gpu_addr; 109 rdev->gart.table_addr = gpu_addr;
111 return r; 110 return r;
112} 111}
113 112
114void radeon_gart_table_vram_free(struct radeon_device *rdev) 113void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
115{ 114{
116 int r; 115 int r;
117 116
118 if (rdev->gart.table.vram.robj == NULL) { 117 if (rdev->gart.robj == NULL) {
119 return; 118 return;
120 } 119 }
121 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); 120 r = radeon_bo_reserve(rdev->gart.robj, false);
122 if (likely(r == 0)) { 121 if (likely(r == 0)) {
123 radeon_bo_kunmap(rdev->gart.table.vram.robj); 122 radeon_bo_kunmap(rdev->gart.robj);
124 radeon_bo_unpin(rdev->gart.table.vram.robj); 123 radeon_bo_unpin(rdev->gart.robj);
125 radeon_bo_unreserve(rdev->gart.table.vram.robj); 124 radeon_bo_unreserve(rdev->gart.robj);
125 rdev->gart.ptr = NULL;
126 } 126 }
127 radeon_bo_unref(&rdev->gart.table.vram.robj); 127}
128
129void radeon_gart_table_vram_free(struct radeon_device *rdev)
130{
131 if (rdev->gart.robj == NULL) {
132 return;
133 }
134 radeon_gart_table_vram_unpin(rdev);
135 radeon_bo_unref(&rdev->gart.robj);
128} 136}
129 137
130 138
@@ -151,12 +159,14 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
151 if (rdev->gart.pages[p]) { 159 if (rdev->gart.pages[p]) {
152 if (!rdev->gart.ttm_alloced[p]) 160 if (!rdev->gart.ttm_alloced[p])
153 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], 161 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
154 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 162 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
155 rdev->gart.pages[p] = NULL; 163 rdev->gart.pages[p] = NULL;
156 rdev->gart.pages_addr[p] = rdev->dummy_page.addr; 164 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
157 page_base = rdev->gart.pages_addr[p]; 165 page_base = rdev->gart.pages_addr[p];
158 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 166 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
159 radeon_gart_set_page(rdev, t, page_base); 167 if (rdev->gart.ptr) {
168 radeon_gart_set_page(rdev, t, page_base);
169 }
160 page_base += RADEON_GPU_PAGE_SIZE; 170 page_base += RADEON_GPU_PAGE_SIZE;
161 } 171 }
162 } 172 }
@@ -199,10 +209,12 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
199 } 209 }
200 } 210 }
201 rdev->gart.pages[p] = pagelist[i]; 211 rdev->gart.pages[p] = pagelist[i];
202 page_base = rdev->gart.pages_addr[p]; 212 if (rdev->gart.ptr) {
203 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 213 page_base = rdev->gart.pages_addr[p];
204 radeon_gart_set_page(rdev, t, page_base); 214 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
205 page_base += RADEON_GPU_PAGE_SIZE; 215 radeon_gart_set_page(rdev, t, page_base);
216 page_base += RADEON_GPU_PAGE_SIZE;
217 }
206 } 218 }
207 } 219 }
208 mb(); 220 mb();
@@ -215,6 +227,9 @@ void radeon_gart_restore(struct radeon_device *rdev)
215 int i, j, t; 227 int i, j, t;
216 u64 page_base; 228 u64 page_base;
217 229
230 if (!rdev->gart.ptr) {
231 return;
232 }
218 for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) { 233 for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
219 page_base = rdev->gart.pages_addr[i]; 234 page_base = rdev->gart.pages_addr[i];
220 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 235 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index e6d110ce2331..7bb1b079f480 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -34,7 +34,7 @@
34 * radeon_ddc_probe 34 * radeon_ddc_probe
35 * 35 *
36 */ 36 */
37bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_extended_probe) 37bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
38{ 38{
39 u8 out = 0x0; 39 u8 out = 0x0;
40 u8 buf[8]; 40 u8 buf[8];
@@ -49,15 +49,11 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_e
49 { 49 {
50 .addr = 0x50, 50 .addr = 0x50,
51 .flags = I2C_M_RD, 51 .flags = I2C_M_RD,
52 .len = 1, 52 .len = 8,
53 .buf = buf, 53 .buf = buf,
54 } 54 }
55 }; 55 };
56 56
57 /* Read 8 bytes from i2c for extended probe of EDID header */
58 if (requires_extended_probe)
59 msgs[1].len = 8;
60
61 /* on hw with routers, select right port */ 57 /* on hw with routers, select right port */
62 if (radeon_connector->router.ddc_valid) 58 if (radeon_connector->router.ddc_valid)
63 radeon_router_select_ddc_port(radeon_connector); 59 radeon_router_select_ddc_port(radeon_connector);
@@ -66,17 +62,15 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_e
66 if (ret != 2) 62 if (ret != 2)
67 /* Couldn't find an accessible DDC on this connector */ 63 /* Couldn't find an accessible DDC on this connector */
68 return false; 64 return false;
69 if (requires_extended_probe) { 65 /* Probe also for valid EDID header
70 /* Probe also for valid EDID header 66 * EDID header starts with:
71 * EDID header starts with: 67 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
72 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00. 68 * Only the first 6 bytes must be valid as
73 * Only the first 6 bytes must be valid as 69 * drm_edid_block_valid() can fix the last 2 bytes */
74 * drm_edid_block_valid() can fix the last 2 bytes */ 70 if (drm_edid_header_is_valid(buf) < 6) {
75 if (drm_edid_header_is_valid(buf) < 6) { 71 /* Couldn't find an accessible EDID on this
76 /* Couldn't find an accessible EDID on this 72 * connector */
77 * connector */ 73 return false;
78 return false;
79 }
80 } 74 }
81 return true; 75 return true;
82} 76}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 9ec830c77af0..8f86aeb26693 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -67,10 +67,10 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
67 /* Disable *all* interrupts */ 67 /* Disable *all* interrupts */
68 rdev->irq.sw_int = false; 68 rdev->irq.sw_int = false;
69 rdev->irq.gui_idle = false; 69 rdev->irq.gui_idle = false;
70 for (i = 0; i < rdev->num_crtc; i++) 70 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
71 rdev->irq.crtc_vblank_int[i] = false;
72 for (i = 0; i < 6; i++) {
73 rdev->irq.hpd[i] = false; 71 rdev->irq.hpd[i] = false;
72 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
73 rdev->irq.crtc_vblank_int[i] = false;
74 rdev->irq.pflip[i] = false; 74 rdev->irq.pflip[i] = false;
75 } 75 }
76 radeon_irq_set(rdev); 76 radeon_irq_set(rdev);
@@ -99,15 +99,55 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
99 /* Disable *all* interrupts */ 99 /* Disable *all* interrupts */
100 rdev->irq.sw_int = false; 100 rdev->irq.sw_int = false;
101 rdev->irq.gui_idle = false; 101 rdev->irq.gui_idle = false;
102 for (i = 0; i < rdev->num_crtc; i++) 102 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
103 rdev->irq.crtc_vblank_int[i] = false;
104 for (i = 0; i < 6; i++) {
105 rdev->irq.hpd[i] = false; 103 rdev->irq.hpd[i] = false;
104 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
105 rdev->irq.crtc_vblank_int[i] = false;
106 rdev->irq.pflip[i] = false; 106 rdev->irq.pflip[i] = false;
107 } 107 }
108 radeon_irq_set(rdev); 108 radeon_irq_set(rdev);
109} 109}
110 110
111static bool radeon_msi_ok(struct radeon_device *rdev)
112{
113 /* RV370/RV380 was first asic with MSI support */
114 if (rdev->family < CHIP_RV380)
115 return false;
116
117 /* MSIs don't work on AGP */
118 if (rdev->flags & RADEON_IS_AGP)
119 return false;
120
121 /* force MSI on */
122 if (radeon_msi == 1)
123 return true;
124 else if (radeon_msi == 0)
125 return false;
126
127 /* Quirks */
128 /* HP RS690 only seems to work with MSIs. */
129 if ((rdev->pdev->device == 0x791f) &&
130 (rdev->pdev->subsystem_vendor == 0x103c) &&
131 (rdev->pdev->subsystem_device == 0x30c2))
132 return true;
133
134 /* Dell RS690 only seems to work with MSIs. */
135 if ((rdev->pdev->device == 0x791f) &&
136 (rdev->pdev->subsystem_vendor == 0x1028) &&
137 (rdev->pdev->subsystem_device == 0x01fd))
138 return true;
139
140 if (rdev->flags & RADEON_IS_IGP) {
141 /* APUs work fine with MSIs */
142 if (rdev->family >= CHIP_PALM)
143 return true;
144 /* lots of IGPs have problems with MSIs */
145 return false;
146 }
147
148 return true;
149}
150
111int radeon_irq_kms_init(struct radeon_device *rdev) 151int radeon_irq_kms_init(struct radeon_device *rdev)
112{ 152{
113 int i; 153 int i;
@@ -124,12 +164,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
124 } 164 }
125 /* enable msi */ 165 /* enable msi */
126 rdev->msi_enabled = 0; 166 rdev->msi_enabled = 0;
127 /* MSIs don't seem to work reliably on all IGP 167
128 * chips. Disable MSI on them for now. 168 if (radeon_msi_ok(rdev)) {
129 */
130 if ((rdev->family >= CHIP_RV380) &&
131 ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) &&
132 (!(rdev->flags & RADEON_IS_AGP))) {
133 int ret = pci_enable_msi(rdev->pdev); 169 int ret = pci_enable_msi(rdev->pdev);
134 if (!ret) { 170 if (!ret) {
135 rdev->msi_enabled = 1; 171 rdev->msi_enabled = 1;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 41a5d48e657b..daadf2111040 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -991,12 +991,6 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
991 struct drm_display_mode *mode, 991 struct drm_display_mode *mode,
992 struct drm_display_mode *adjusted_mode) 992 struct drm_display_mode *adjusted_mode)
993{ 993{
994 struct drm_device *dev = crtc->dev;
995 struct radeon_device *rdev = dev->dev_private;
996
997 /* adjust pm to upcoming mode change */
998 radeon_pm_compute_clocks(rdev);
999
1000 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 994 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
1001 return false; 995 return false;
1002 return true; 996 return true;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ed0178f03235..2c2e75ef8a37 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -438,9 +438,6 @@ struct radeon_connector {
438 struct radeon_i2c_chan *ddc_bus; 438 struct radeon_i2c_chan *ddc_bus;
439 /* some systems have an hdmi and vga port with a shared ddc line */ 439 /* some systems have an hdmi and vga port with a shared ddc line */
440 bool shared_ddc; 440 bool shared_ddc;
441 /* for some Radeon chip families we apply an additional EDID header
442 check as part of the DDC probe */
443 bool requires_extended_probe;
444 bool use_digital; 441 bool use_digital;
445 /* we need to mind the EDID between detect 442 /* we need to mind the EDID between detect
446 and get modes due to analog/digital/tvencoder */ 443 and get modes due to analog/digital/tvencoder */
@@ -459,6 +456,8 @@ struct radeon_framebuffer {
459 struct drm_gem_object *obj; 456 struct drm_gem_object *obj;
460}; 457};
461 458
459#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
460 ((em) == ATOM_ENCODER_MODE_DP_MST))
462 461
463extern enum radeon_tv_std 462extern enum radeon_tv_std
464radeon_combios_get_tv_info(struct radeon_device *rdev); 463radeon_combios_get_tv_info(struct radeon_device *rdev);
@@ -468,8 +467,8 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev);
468extern struct drm_connector * 467extern struct drm_connector *
469radeon_get_connector_for_encoder(struct drm_encoder *encoder); 468radeon_get_connector_for_encoder(struct drm_encoder *encoder);
470 469
471extern bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder); 470extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
472extern bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector); 471extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
473extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector); 472extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
474extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector); 473extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
475 474
@@ -489,7 +488,7 @@ extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
489 int action, uint8_t lane_num, 488 int action, uint8_t lane_num,
490 uint8_t lane_set); 489 uint8_t lane_set);
491extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder); 490extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
492extern struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder); 491extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
493extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 492extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
494 u8 write_byte, u8 *read_byte); 493 u8 write_byte, u8 *read_byte);
495 494
@@ -519,8 +518,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
519 u8 val); 518 u8 val);
520extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); 519extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
521extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); 520extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
522extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, 521extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
523 bool requires_extended_probe);
524extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); 522extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
525 523
526extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); 524extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 6fabe89fa6a1..78a665bd9519 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -53,6 +53,24 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev);
53 53
54#define ACPI_AC_CLASS "ac_adapter" 54#define ACPI_AC_CLASS "ac_adapter"
55 55
56int radeon_pm_get_type_index(struct radeon_device *rdev,
57 enum radeon_pm_state_type ps_type,
58 int instance)
59{
60 int i;
61 int found_instance = -1;
62
63 for (i = 0; i < rdev->pm.num_power_states; i++) {
64 if (rdev->pm.power_state[i].type == ps_type) {
65 found_instance++;
66 if (found_instance == instance)
67 return i;
68 }
69 }
70 /* return default if no match */
71 return rdev->pm.default_power_state_index;
72}
73
56#ifdef CONFIG_ACPI 74#ifdef CONFIG_ACPI
57static int radeon_acpi_event(struct notifier_block *nb, 75static int radeon_acpi_event(struct notifier_block *nb,
58 unsigned long val, 76 unsigned long val,
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 89a6e1ecea8d..06b90c87f8f3 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -77,7 +77,7 @@ int rs400_gart_init(struct radeon_device *rdev)
77{ 77{
78 int r; 78 int r;
79 79
80 if (rdev->gart.table.ram.ptr) { 80 if (rdev->gart.ptr) {
81 WARN(1, "RS400 GART already initialized\n"); 81 WARN(1, "RS400 GART already initialized\n");
82 return 0; 82 return 0;
83 } 83 }
@@ -212,6 +212,7 @@ void rs400_gart_fini(struct radeon_device *rdev)
212int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 212int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
213{ 213{
214 uint32_t entry; 214 uint32_t entry;
215 u32 *gtt = rdev->gart.ptr;
215 216
216 if (i < 0 || i > rdev->gart.num_gpu_pages) { 217 if (i < 0 || i > rdev->gart.num_gpu_pages) {
217 return -EINVAL; 218 return -EINVAL;
@@ -221,7 +222,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
221 ((upper_32_bits(addr) & 0xff) << 4) | 222 ((upper_32_bits(addr) & 0xff) << 4) |
222 RS400_PTE_WRITEABLE | RS400_PTE_READABLE; 223 RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
223 entry = cpu_to_le32(entry); 224 entry = cpu_to_le32(entry);
224 rdev->gart.table.ram.ptr[i] = entry; 225 gtt[i] = entry;
225 return 0; 226 return 0;
226} 227}
227 228
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 9320dd6404f6..481b99e89f65 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -287,6 +287,7 @@ void rs600_hpd_init(struct radeon_device *rdev)
287 default: 287 default:
288 break; 288 break;
289 } 289 }
290 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
290 } 291 }
291 if (rdev->irq.installed) 292 if (rdev->irq.installed)
292 rs600_irq_set(rdev); 293 rs600_irq_set(rdev);
@@ -413,7 +414,7 @@ int rs600_gart_init(struct radeon_device *rdev)
413{ 414{
414 int r; 415 int r;
415 416
416 if (rdev->gart.table.vram.robj) { 417 if (rdev->gart.robj) {
417 WARN(1, "RS600 GART already initialized\n"); 418 WARN(1, "RS600 GART already initialized\n");
418 return 0; 419 return 0;
419 } 420 }
@@ -431,7 +432,7 @@ static int rs600_gart_enable(struct radeon_device *rdev)
431 u32 tmp; 432 u32 tmp;
432 int r, i; 433 int r, i;
433 434
434 if (rdev->gart.table.vram.robj == NULL) { 435 if (rdev->gart.robj == NULL) {
435 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 436 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
436 return -EINVAL; 437 return -EINVAL;
437 } 438 }
@@ -494,20 +495,12 @@ static int rs600_gart_enable(struct radeon_device *rdev)
494void rs600_gart_disable(struct radeon_device *rdev) 495void rs600_gart_disable(struct radeon_device *rdev)
495{ 496{
496 u32 tmp; 497 u32 tmp;
497 int r;
498 498
499 /* FIXME: disable out of gart access */ 499 /* FIXME: disable out of gart access */
500 WREG32_MC(R_000100_MC_PT0_CNTL, 0); 500 WREG32_MC(R_000100_MC_PT0_CNTL, 0);
501 tmp = RREG32_MC(R_000009_MC_CNTL1); 501 tmp = RREG32_MC(R_000009_MC_CNTL1);
502 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); 502 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
503 if (rdev->gart.table.vram.robj) { 503 radeon_gart_table_vram_unpin(rdev);
504 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
505 if (r == 0) {
506 radeon_bo_kunmap(rdev->gart.table.vram.robj);
507 radeon_bo_unpin(rdev->gart.table.vram.robj);
508 radeon_bo_unreserve(rdev->gart.table.vram.robj);
509 }
510 }
511} 504}
512 505
513void rs600_gart_fini(struct radeon_device *rdev) 506void rs600_gart_fini(struct radeon_device *rdev)
@@ -525,7 +518,7 @@ void rs600_gart_fini(struct radeon_device *rdev)
525 518
526int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 519int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
527{ 520{
528 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; 521 void __iomem *ptr = (void *)rdev->gart.ptr;
529 522
530 if (i < 0 || i > rdev->gart.num_gpu_pages) { 523 if (i < 0 || i > rdev->gart.num_gpu_pages) {
531 return -EINVAL; 524 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 87cc1feee3ac..a983f410ab89 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -124,7 +124,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
124 u32 tmp; 124 u32 tmp;
125 int r, i; 125 int r, i;
126 126
127 if (rdev->gart.table.vram.robj == NULL) { 127 if (rdev->gart.robj == NULL) {
128 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 128 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
129 return -EINVAL; 129 return -EINVAL;
130 } 130 }
@@ -171,7 +171,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
171void rv770_pcie_gart_disable(struct radeon_device *rdev) 171void rv770_pcie_gart_disable(struct radeon_device *rdev)
172{ 172{
173 u32 tmp; 173 u32 tmp;
174 int i, r; 174 int i;
175 175
176 /* Disable all tables */ 176 /* Disable all tables */
177 for (i = 0; i < 7; i++) 177 for (i = 0; i < 7; i++)
@@ -191,14 +191,7 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
191 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 191 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
192 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 192 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
193 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 193 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
194 if (rdev->gart.table.vram.robj) { 194 radeon_gart_table_vram_unpin(rdev);
195 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
196 if (likely(r == 0)) {
197 radeon_bo_kunmap(rdev->gart.table.vram.robj);
198 radeon_bo_unpin(rdev->gart.table.vram.robj);
199 radeon_bo_unreserve(rdev->gart.table.vram.robj);
200 }
201 }
202} 195}
203 196
204void rv770_pcie_gart_fini(struct radeon_device *rdev) 197void rv770_pcie_gart_fini(struct radeon_device *rdev)
@@ -282,7 +275,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
282 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 275 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
283 rdev->mc.vram_end >> 12); 276 rdev->mc.vram_end >> 12);
284 } 277 }
285 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); 278 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
286 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; 279 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
287 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 280 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
288 WREG32(MC_VM_FB_LOCATION, tmp); 281 WREG32(MC_VM_FB_LOCATION, tmp);
@@ -959,54 +952,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
959 952
960} 953}
961 954
962static int rv770_vram_scratch_init(struct radeon_device *rdev)
963{
964 int r;
965 u64 gpu_addr;
966
967 if (rdev->vram_scratch.robj == NULL) {
968 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
969 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
970 &rdev->vram_scratch.robj);
971 if (r) {
972 return r;
973 }
974 }
975
976 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
977 if (unlikely(r != 0))
978 return r;
979 r = radeon_bo_pin(rdev->vram_scratch.robj,
980 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
981 if (r) {
982 radeon_bo_unreserve(rdev->vram_scratch.robj);
983 return r;
984 }
985 r = radeon_bo_kmap(rdev->vram_scratch.robj,
986 (void **)&rdev->vram_scratch.ptr);
987 if (r)
988 radeon_bo_unpin(rdev->vram_scratch.robj);
989 radeon_bo_unreserve(rdev->vram_scratch.robj);
990
991 return r;
992}
993
994static void rv770_vram_scratch_fini(struct radeon_device *rdev)
995{
996 int r;
997
998 if (rdev->vram_scratch.robj == NULL) {
999 return;
1000 }
1001 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1002 if (likely(r == 0)) {
1003 radeon_bo_kunmap(rdev->vram_scratch.robj);
1004 radeon_bo_unpin(rdev->vram_scratch.robj);
1005 radeon_bo_unreserve(rdev->vram_scratch.robj);
1006 }
1007 radeon_bo_unref(&rdev->vram_scratch.robj);
1008}
1009
1010void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 955void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1011{ 956{
1012 u64 size_bf, size_af; 957 u64 size_bf, size_af;
@@ -1106,6 +1051,10 @@ static int rv770_startup(struct radeon_device *rdev)
1106 } 1051 }
1107 } 1052 }
1108 1053
1054 r = r600_vram_scratch_init(rdev);
1055 if (r)
1056 return r;
1057
1109 rv770_mc_program(rdev); 1058 rv770_mc_program(rdev);
1110 if (rdev->flags & RADEON_IS_AGP) { 1059 if (rdev->flags & RADEON_IS_AGP) {
1111 rv770_agp_enable(rdev); 1060 rv770_agp_enable(rdev);
@@ -1114,9 +1063,7 @@ static int rv770_startup(struct radeon_device *rdev)
1114 if (r) 1063 if (r)
1115 return r; 1064 return r;
1116 } 1065 }
1117 r = rv770_vram_scratch_init(rdev); 1066
1118 if (r)
1119 return r;
1120 rv770_gpu_init(rdev); 1067 rv770_gpu_init(rdev);
1121 r = r600_blit_init(rdev); 1068 r = r600_blit_init(rdev);
1122 if (r) { 1069 if (r) {
@@ -1316,7 +1263,7 @@ void rv770_fini(struct radeon_device *rdev)
1316 radeon_ib_pool_fini(rdev); 1263 radeon_ib_pool_fini(rdev);
1317 radeon_irq_kms_fini(rdev); 1264 radeon_irq_kms_fini(rdev);
1318 rv770_pcie_gart_fini(rdev); 1265 rv770_pcie_gart_fini(rdev);
1319 rv770_vram_scratch_fini(rdev); 1266 r600_vram_scratch_fini(rdev);
1320 radeon_gem_fini(rdev); 1267 radeon_gem_fini(rdev);
1321 radeon_fence_driver_fini(rdev); 1268 radeon_fence_driver_fini(rdev);
1322 radeon_agp_fini(rdev); 1269 radeon_agp_fini(rdev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 1805b8c2a948..dff8fc767152 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -104,6 +104,9 @@
104#define DRM_IOCTL_VMW_PRESENT_READBACK \ 104#define DRM_IOCTL_VMW_PRESENT_READBACK \
105 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ 105 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
106 struct drm_vmw_present_readback_arg) 106 struct drm_vmw_present_readback_arg)
107#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
108 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
109 struct drm_vmw_update_layout_arg)
107 110
108/** 111/**
109 * The core DRM version of this macro doesn't account for 112 * The core DRM version of this macro doesn't account for
@@ -166,6 +169,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = {
166 VMW_IOCTL_DEF(VMW_PRESENT_READBACK, 169 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
167 vmw_present_readback_ioctl, 170 vmw_present_readback_ioctl,
168 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), 171 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
172 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
173 vmw_kms_update_layout_ioctl,
174 DRM_MASTER | DRM_UNLOCKED),
169}; 175};
170 176
171static struct pci_device_id vmw_pci_id_list[] = { 177static struct pci_device_id vmw_pci_id_list[] = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 30589d0aecd9..8cca91a93bde 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,9 +40,9 @@
40#include "ttm/ttm_module.h" 40#include "ttm/ttm_module.h"
41#include "vmwgfx_fence.h" 41#include "vmwgfx_fence.h"
42 42
43#define VMWGFX_DRIVER_DATE "20111008" 43#define VMWGFX_DRIVER_DATE "20111025"
44#define VMWGFX_DRIVER_MAJOR 2 44#define VMWGFX_DRIVER_MAJOR 2
45#define VMWGFX_DRIVER_MINOR 2 45#define VMWGFX_DRIVER_MINOR 3
46#define VMWGFX_DRIVER_PATCHLEVEL 0 46#define VMWGFX_DRIVER_PATCHLEVEL 0
47#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 47#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 48#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -633,6 +633,8 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
633 struct drm_vmw_fence_rep __user *user_fence_rep, 633 struct drm_vmw_fence_rep __user *user_fence_rep,
634 struct drm_vmw_rect *clips, 634 struct drm_vmw_rect *clips,
635 uint32_t num_clips); 635 uint32_t num_clips);
636int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
637 struct drm_file *file_priv);
636 638
637/** 639/**
638 * Overlay control - vmwgfx_overlay.c 640 * Overlay control - vmwgfx_overlay.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8b14dfd513a1..880e285d7578 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -105,12 +105,17 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
105 struct vmw_dma_buffer *dmabuf = NULL; 105 struct vmw_dma_buffer *dmabuf = NULL;
106 int ret; 106 int ret;
107 107
108 /* A lot of the code assumes this */
109 if (handle && (width != 64 || height != 64))
110 return -EINVAL;
111
108 if (handle) { 112 if (handle) {
109 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, 113 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
110 handle, &surface); 114 handle, &surface);
111 if (!ret) { 115 if (!ret) {
112 if (!surface->snooper.image) { 116 if (!surface->snooper.image) {
113 DRM_ERROR("surface not suitable for cursor\n"); 117 DRM_ERROR("surface not suitable for cursor\n");
118 vmw_surface_unreference(&surface);
114 return -EINVAL; 119 return -EINVAL;
115 } 120 }
116 } else { 121 } else {
@@ -176,7 +181,9 @@ err_unreserve:
176 return 0; 181 return 0;
177 } 182 }
178 183
179 vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y); 184 vmw_cursor_update_position(dev_priv, true,
185 du->cursor_x + du->hotspot_x,
186 du->cursor_y + du->hotspot_y);
180 187
181 return 0; 188 return 0;
182} 189}
@@ -191,7 +198,8 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
191 du->cursor_y = y + crtc->y; 198 du->cursor_y = y + crtc->y;
192 199
193 vmw_cursor_update_position(dev_priv, shown, 200 vmw_cursor_update_position(dev_priv, shown,
194 du->cursor_x, du->cursor_y); 201 du->cursor_x + du->hotspot_x,
202 du->cursor_y + du->hotspot_y);
195 203
196 return 0; 204 return 0;
197} 205}
@@ -212,7 +220,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
212 SVGA3dCmdHeader header; 220 SVGA3dCmdHeader header;
213 SVGA3dCmdSurfaceDMA dma; 221 SVGA3dCmdSurfaceDMA dma;
214 } *cmd; 222 } *cmd;
215 int ret; 223 int i, ret;
216 224
217 cmd = container_of(header, struct vmw_dma_cmd, header); 225 cmd = container_of(header, struct vmw_dma_cmd, header);
218 226
@@ -234,16 +242,19 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
234 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / 242 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
235 sizeof(SVGA3dCopyBox); 243 sizeof(SVGA3dCopyBox);
236 244
237 if (cmd->dma.guest.pitch != (64 * 4) || 245 if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
238 cmd->dma.guest.ptr.offset % PAGE_SIZE ||
239 box->x != 0 || box->y != 0 || box->z != 0 || 246 box->x != 0 || box->y != 0 || box->z != 0 ||
240 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || 247 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
241 box->w != 64 || box->h != 64 || box->d != 1 || 248 box->d != 1 || box_count != 1) {
242 box_count != 1) {
243 /* TODO handle none page aligned offsets */ 249 /* TODO handle none page aligned offsets */
244 /* TODO handle partial uploads and pitch != 256 */ 250 /* TODO handle more dst & src != 0 */
245 /* TODO handle more then one copy (size != 64) */ 251 /* TODO handle more then one copy */
246 DRM_ERROR("lazy programmer, can't handle weird stuff\n"); 252 DRM_ERROR("Cant snoop dma request for cursor!\n");
253 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
254 box->srcx, box->srcy, box->srcz,
255 box->x, box->y, box->z,
256 box->w, box->h, box->d, box_count,
257 cmd->dma.guest.ptr.offset);
247 return; 258 return;
248 } 259 }
249 260
@@ -262,7 +273,16 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
262 273
263 virtual = ttm_kmap_obj_virtual(&map, &dummy); 274 virtual = ttm_kmap_obj_virtual(&map, &dummy);
264 275
265 memcpy(srf->snooper.image, virtual, 64*64*4); 276 if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
277 memcpy(srf->snooper.image, virtual, 64*64*4);
278 } else {
279 /* Image is unsigned pointer. */
280 for (i = 0; i < box->h; i++)
281 memcpy(srf->snooper.image + i * 64,
282 virtual + i * cmd->dma.guest.pitch,
283 box->w * 4);
284 }
285
266 srf->snooper.age++; 286 srf->snooper.age++;
267 287
268 /* we can't call this function from this function since execbuf has 288 /* we can't call this function from this function since execbuf has
@@ -394,8 +414,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
394 top = clips->y1; 414 top = clips->y1;
395 bottom = clips->y2; 415 bottom = clips->y2;
396 416
397 clips_ptr = clips; 417 /* skip the first clip rect */
398 for (i = 1; i < num_clips; i++, clips_ptr += inc) { 418 for (i = 1, clips_ptr = clips + inc;
419 i < num_clips; i++, clips_ptr += inc) {
399 left = min_t(int, left, (int)clips_ptr->x1); 420 left = min_t(int, left, (int)clips_ptr->x1);
400 right = max_t(int, right, (int)clips_ptr->x2); 421 right = max_t(int, right, (int)clips_ptr->x2);
401 top = min_t(int, top, (int)clips_ptr->y1); 422 top = min_t(int, top, (int)clips_ptr->y1);
@@ -994,7 +1015,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
994 required_size = mode_cmd->pitch * mode_cmd->height; 1015 required_size = mode_cmd->pitch * mode_cmd->height;
995 if (unlikely(required_size > (u64) dev_priv->vram_size)) { 1016 if (unlikely(required_size > (u64) dev_priv->vram_size)) {
996 DRM_ERROR("VRAM size is too small for requested mode.\n"); 1017 DRM_ERROR("VRAM size is too small for requested mode.\n");
997 return NULL; 1018 return ERR_PTR(-ENOMEM);
998 } 1019 }
999 1020
1000 /* 1021 /*
@@ -1307,7 +1328,10 @@ int vmw_kms_close(struct vmw_private *dev_priv)
1307 * drm_encoder_cleanup which takes the lock we deadlock. 1328 * drm_encoder_cleanup which takes the lock we deadlock.
1308 */ 1329 */
1309 drm_mode_config_cleanup(dev_priv->dev); 1330 drm_mode_config_cleanup(dev_priv->dev);
1310 vmw_kms_close_legacy_display_system(dev_priv); 1331 if (dev_priv->sou_priv)
1332 vmw_kms_close_screen_object_display(dev_priv);
1333 else
1334 vmw_kms_close_legacy_display_system(dev_priv);
1311 return 0; 1335 return 0;
1312} 1336}
1313 1337
@@ -1517,6 +1541,8 @@ int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1517 du->pref_width = rects[du->unit].w; 1541 du->pref_width = rects[du->unit].w;
1518 du->pref_height = rects[du->unit].h; 1542 du->pref_height = rects[du->unit].h;
1519 du->pref_active = true; 1543 du->pref_active = true;
1544 du->gui_x = rects[du->unit].x;
1545 du->gui_y = rects[du->unit].y;
1520 } else { 1546 } else {
1521 du->pref_width = 800; 1547 du->pref_width = 800;
1522 du->pref_height = 600; 1548 du->pref_height = 600;
@@ -1572,12 +1598,14 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
1572 uint32_t num_displays; 1598 uint32_t num_displays;
1573 struct drm_device *dev = connector->dev; 1599 struct drm_device *dev = connector->dev;
1574 struct vmw_private *dev_priv = vmw_priv(dev); 1600 struct vmw_private *dev_priv = vmw_priv(dev);
1601 struct vmw_display_unit *du = vmw_connector_to_du(connector);
1575 1602
1576 mutex_lock(&dev_priv->hw_mutex); 1603 mutex_lock(&dev_priv->hw_mutex);
1577 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); 1604 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
1578 mutex_unlock(&dev_priv->hw_mutex); 1605 mutex_unlock(&dev_priv->hw_mutex);
1579 1606
1580 return ((vmw_connector_to_du(connector)->unit < num_displays) ? 1607 return ((vmw_connector_to_du(connector)->unit < num_displays &&
1608 du->pref_active) ?
1581 connector_status_connected : connector_status_disconnected); 1609 connector_status_connected : connector_status_disconnected);
1582} 1610}
1583 1611
@@ -1658,6 +1686,28 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = {
1658 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, 1686 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
1659}; 1687};
1660 1688
1689/**
1690 * vmw_guess_mode_timing - Provide fake timings for a
1691 * 60Hz vrefresh mode.
1692 *
1693 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
1694 * members filled in.
1695 */
1696static void vmw_guess_mode_timing(struct drm_display_mode *mode)
1697{
1698 mode->hsync_start = mode->hdisplay + 50;
1699 mode->hsync_end = mode->hsync_start + 50;
1700 mode->htotal = mode->hsync_end + 50;
1701
1702 mode->vsync_start = mode->vdisplay + 50;
1703 mode->vsync_end = mode->vsync_start + 50;
1704 mode->vtotal = mode->vsync_end + 50;
1705
1706 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
1707 mode->vrefresh = drm_mode_vrefresh(mode);
1708}
1709
1710
1661int vmw_du_connector_fill_modes(struct drm_connector *connector, 1711int vmw_du_connector_fill_modes(struct drm_connector *connector,
1662 uint32_t max_width, uint32_t max_height) 1712 uint32_t max_width, uint32_t max_height)
1663{ 1713{
@@ -1680,18 +1730,23 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
1680 return 0; 1730 return 0;
1681 mode->hdisplay = du->pref_width; 1731 mode->hdisplay = du->pref_width;
1682 mode->vdisplay = du->pref_height; 1732 mode->vdisplay = du->pref_height;
1683 mode->vrefresh = drm_mode_vrefresh(mode); 1733 vmw_guess_mode_timing(mode);
1734
1684 if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, 1735 if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
1685 mode->vdisplay)) { 1736 mode->vdisplay)) {
1686 drm_mode_probed_add(connector, mode); 1737 drm_mode_probed_add(connector, mode);
1738 } else {
1739 drm_mode_destroy(dev, mode);
1740 mode = NULL;
1741 }
1687 1742
1688 if (du->pref_mode) { 1743 if (du->pref_mode) {
1689 list_del_init(&du->pref_mode->head); 1744 list_del_init(&du->pref_mode->head);
1690 drm_mode_destroy(dev, du->pref_mode); 1745 drm_mode_destroy(dev, du->pref_mode);
1691 }
1692
1693 du->pref_mode = mode;
1694 } 1746 }
1747
1748 /* mode might be null here, this is intended */
1749 du->pref_mode = mode;
1695 } 1750 }
1696 1751
1697 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { 1752 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
@@ -1712,6 +1767,10 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
1712 drm_mode_probed_add(connector, mode); 1767 drm_mode_probed_add(connector, mode);
1713 } 1768 }
1714 1769
1770 /* Move the prefered mode first, help apps pick the right mode. */
1771 if (du->pref_mode)
1772 list_move(&du->pref_mode->head, &connector->probed_modes);
1773
1715 drm_mode_connector_list_update(connector); 1774 drm_mode_connector_list_update(connector);
1716 1775
1717 return 1; 1776 return 1;
@@ -1723,3 +1782,63 @@ int vmw_du_connector_set_property(struct drm_connector *connector,
1723{ 1782{
1724 return 0; 1783 return 0;
1725} 1784}
1785
1786
1787int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1788 struct drm_file *file_priv)
1789{
1790 struct vmw_private *dev_priv = vmw_priv(dev);
1791 struct drm_vmw_update_layout_arg *arg =
1792 (struct drm_vmw_update_layout_arg *)data;
1793 struct vmw_master *vmaster = vmw_master(file_priv->master);
1794 void __user *user_rects;
1795 struct drm_vmw_rect *rects;
1796 unsigned rects_size;
1797 int ret;
1798 int i;
1799 struct drm_mode_config *mode_config = &dev->mode_config;
1800
1801 ret = ttm_read_lock(&vmaster->lock, true);
1802 if (unlikely(ret != 0))
1803 return ret;
1804
1805 if (!arg->num_outputs) {
1806 struct drm_vmw_rect def_rect = {0, 0, 800, 600};
1807 vmw_du_update_layout(dev_priv, 1, &def_rect);
1808 goto out_unlock;
1809 }
1810
1811 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
1812 rects = kzalloc(rects_size, GFP_KERNEL);
1813 if (unlikely(!rects)) {
1814 ret = -ENOMEM;
1815 goto out_unlock;
1816 }
1817
1818 user_rects = (void __user *)(unsigned long)arg->rects;
1819 ret = copy_from_user(rects, user_rects, rects_size);
1820 if (unlikely(ret != 0)) {
1821 DRM_ERROR("Failed to get rects.\n");
1822 ret = -EFAULT;
1823 goto out_free;
1824 }
1825
1826 for (i = 0; i < arg->num_outputs; ++i) {
1827 if (rects->x < 0 ||
1828 rects->y < 0 ||
1829 rects->x + rects->w > mode_config->max_width ||
1830 rects->y + rects->h > mode_config->max_height) {
1831 DRM_ERROR("Invalid GUI layout.\n");
1832 ret = -EINVAL;
1833 goto out_free;
1834 }
1835 }
1836
1837 vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
1838
1839out_free:
1840 kfree(rects);
1841out_unlock:
1842 ttm_read_unlock(&vmaster->lock);
1843 return ret;
1844}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index db0b901f8c3f..af8e6e5bd964 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -96,6 +96,13 @@ struct vmw_display_unit {
96 unsigned pref_height; 96 unsigned pref_height;
97 bool pref_active; 97 bool pref_active;
98 struct drm_display_mode *pref_mode; 98 struct drm_display_mode *pref_mode;
99
100 /*
101 * Gui positioning
102 */
103 int gui_x;
104 int gui_y;
105 bool is_implicit;
99}; 106};
100 107
101#define vmw_crtc_to_du(x) \ 108#define vmw_crtc_to_du(x) \
@@ -126,8 +133,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
126int vmw_du_connector_set_property(struct drm_connector *connector, 133int vmw_du_connector_set_property(struct drm_connector *connector,
127 struct drm_property *property, 134 struct drm_property *property,
128 uint64_t val); 135 uint64_t val);
129int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, 136
130 struct drm_vmw_rect *rects);
131 137
132/* 138/*
133 * Legacy display unit functions - vmwgfx_ldu.c 139 * Legacy display unit functions - vmwgfx_ldu.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 92f56bc594eb..90c5e3928491 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -337,13 +337,14 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
337 ldu->base.pref_width = 800; 337 ldu->base.pref_width = 800;
338 ldu->base.pref_height = 600; 338 ldu->base.pref_height = 600;
339 ldu->base.pref_mode = NULL; 339 ldu->base.pref_mode = NULL;
340 ldu->base.is_implicit = true;
340 341
341 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, 342 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
342 DRM_MODE_CONNECTOR_LVDS); 343 DRM_MODE_CONNECTOR_VIRTUAL);
343 connector->status = vmw_du_connector_detect(connector, true); 344 connector->status = vmw_du_connector_detect(connector, true);
344 345
345 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, 346 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
346 DRM_MODE_ENCODER_LVDS); 347 DRM_MODE_ENCODER_VIRTUAL);
347 drm_mode_connector_attach_encoder(connector, encoder); 348 drm_mode_connector_attach_encoder(connector, encoder);
348 encoder->possible_crtcs = (1 << unit); 349 encoder->possible_crtcs = (1 << unit);
349 encoder->possible_clones = 0; 350 encoder->possible_clones = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 477b2a9eb3c2..4defdcf1c72e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -36,12 +36,9 @@
36 container_of(x, struct vmw_screen_object_unit, base.connector) 36 container_of(x, struct vmw_screen_object_unit, base.connector)
37 37
38struct vmw_screen_object_display { 38struct vmw_screen_object_display {
39 struct list_head active; 39 unsigned num_implicit;
40 40
41 unsigned num_active; 41 struct vmw_framebuffer *implicit_fb;
42 unsigned last_num_active;
43
44 struct vmw_framebuffer *fb;
45}; 42};
46 43
47/** 44/**
@@ -54,13 +51,11 @@ struct vmw_screen_object_unit {
54 struct vmw_dma_buffer *buffer; /**< Backing store buffer */ 51 struct vmw_dma_buffer *buffer; /**< Backing store buffer */
55 52
56 bool defined; 53 bool defined;
57 54 bool active_implicit;
58 struct list_head active;
59}; 55};
60 56
61static void vmw_sou_destroy(struct vmw_screen_object_unit *sou) 57static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
62{ 58{
63 list_del_init(&sou->active);
64 vmw_display_unit_cleanup(&sou->base); 59 vmw_display_unit_cleanup(&sou->base);
65 kfree(sou); 60 kfree(sou);
66} 61}
@@ -75,58 +70,31 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
75 vmw_sou_destroy(vmw_crtc_to_sou(crtc)); 70 vmw_sou_destroy(vmw_crtc_to_sou(crtc));
76} 71}
77 72
78static int vmw_sou_del_active(struct vmw_private *vmw_priv, 73static void vmw_sou_del_active(struct vmw_private *vmw_priv,
79 struct vmw_screen_object_unit *sou) 74 struct vmw_screen_object_unit *sou)
80{ 75{
81 struct vmw_screen_object_display *ld = vmw_priv->sou_priv; 76 struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
82 if (list_empty(&sou->active))
83 return 0;
84 77
85 /* Must init otherwise list_empty(&sou->active) will not work. */ 78 if (sou->active_implicit) {
86 list_del_init(&sou->active); 79 if (--(ld->num_implicit) == 0)
87 if (--(ld->num_active) == 0) { 80 ld->implicit_fb = NULL;
88 BUG_ON(!ld->fb); 81 sou->active_implicit = false;
89 if (ld->fb->unpin)
90 ld->fb->unpin(ld->fb);
91 ld->fb = NULL;
92 } 82 }
93
94 return 0;
95} 83}
96 84
97static int vmw_sou_add_active(struct vmw_private *vmw_priv, 85static void vmw_sou_add_active(struct vmw_private *vmw_priv,
98 struct vmw_screen_object_unit *sou, 86 struct vmw_screen_object_unit *sou,
99 struct vmw_framebuffer *vfb) 87 struct vmw_framebuffer *vfb)
100{ 88{
101 struct vmw_screen_object_display *ld = vmw_priv->sou_priv; 89 struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
102 struct vmw_screen_object_unit *entry;
103 struct list_head *at;
104
105 BUG_ON(!ld->num_active && ld->fb);
106 if (vfb != ld->fb) {
107 if (ld->fb && ld->fb->unpin)
108 ld->fb->unpin(ld->fb);
109 if (vfb->pin)
110 vfb->pin(vfb);
111 ld->fb = vfb;
112 }
113
114 if (!list_empty(&sou->active))
115 return 0;
116 90
117 at = &ld->active; 91 BUG_ON(!ld->num_implicit && ld->implicit_fb);
118 list_for_each_entry(entry, &ld->active, active) {
119 if (entry->base.unit > sou->base.unit)
120 break;
121 92
122 at = &entry->active; 93 if (!sou->active_implicit && sou->base.is_implicit) {
94 ld->implicit_fb = vfb;
95 sou->active_implicit = true;
96 ld->num_implicit++;
123 } 97 }
124
125 list_add(&sou->active, at);
126
127 ld->num_active++;
128
129 return 0;
130} 98}
131 99
132/** 100/**
@@ -164,8 +132,13 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
164 (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0); 132 (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
165 cmd->obj.size.width = mode->hdisplay; 133 cmd->obj.size.width = mode->hdisplay;
166 cmd->obj.size.height = mode->vdisplay; 134 cmd->obj.size.height = mode->vdisplay;
167 cmd->obj.root.x = x; 135 if (sou->base.is_implicit) {
168 cmd->obj.root.y = y; 136 cmd->obj.root.x = x;
137 cmd->obj.root.y = y;
138 } else {
139 cmd->obj.root.x = sou->base.gui_x;
140 cmd->obj.root.y = sou->base.gui_y;
141 }
169 142
170 /* Ok to assume that buffer is pinned in vram */ 143 /* Ok to assume that buffer is pinned in vram */
171 vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr); 144 vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
@@ -312,10 +285,11 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
312 } 285 }
313 286
314 /* sou only supports one fb active at the time */ 287 /* sou only supports one fb active at the time */
315 if (dev_priv->sou_priv->fb && vfb && 288 if (sou->base.is_implicit &&
316 !(dev_priv->sou_priv->num_active == 1 && 289 dev_priv->sou_priv->implicit_fb && vfb &&
317 !list_empty(&sou->active)) && 290 !(dev_priv->sou_priv->num_implicit == 1 &&
318 dev_priv->sou_priv->fb != vfb) { 291 sou->active_implicit) &&
292 dev_priv->sou_priv->implicit_fb != vfb) {
319 DRM_ERROR("Multiple framebuffers not supported\n"); 293 DRM_ERROR("Multiple framebuffers not supported\n");
320 return -EINVAL; 294 return -EINVAL;
321 } 295 }
@@ -471,19 +445,20 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
471 encoder = &sou->base.encoder; 445 encoder = &sou->base.encoder;
472 connector = &sou->base.connector; 446 connector = &sou->base.connector;
473 447
474 INIT_LIST_HEAD(&sou->active); 448 sou->active_implicit = false;
475 449
476 sou->base.pref_active = (unit == 0); 450 sou->base.pref_active = (unit == 0);
477 sou->base.pref_width = 800; 451 sou->base.pref_width = 800;
478 sou->base.pref_height = 600; 452 sou->base.pref_height = 600;
479 sou->base.pref_mode = NULL; 453 sou->base.pref_mode = NULL;
454 sou->base.is_implicit = true;
480 455
481 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, 456 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
482 DRM_MODE_CONNECTOR_LVDS); 457 DRM_MODE_CONNECTOR_VIRTUAL);
483 connector->status = vmw_du_connector_detect(connector, true); 458 connector->status = vmw_du_connector_detect(connector, true);
484 459
485 drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs, 460 drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
486 DRM_MODE_ENCODER_LVDS); 461 DRM_MODE_ENCODER_VIRTUAL);
487 drm_mode_connector_attach_encoder(connector, encoder); 462 drm_mode_connector_attach_encoder(connector, encoder);
488 encoder->possible_crtcs = (1 << unit); 463 encoder->possible_crtcs = (1 << unit);
489 encoder->possible_clones = 0; 464 encoder->possible_clones = 0;
@@ -520,10 +495,8 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
520 if (unlikely(!dev_priv->sou_priv)) 495 if (unlikely(!dev_priv->sou_priv))
521 goto err_no_mem; 496 goto err_no_mem;
522 497
523 INIT_LIST_HEAD(&dev_priv->sou_priv->active); 498 dev_priv->sou_priv->num_implicit = 0;
524 dev_priv->sou_priv->num_active = 0; 499 dev_priv->sou_priv->implicit_fb = NULL;
525 dev_priv->sou_priv->last_num_active = 0;
526 dev_priv->sou_priv->fb = NULL;
527 500
528 ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS); 501 ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
529 if (unlikely(ret != 0)) 502 if (unlikely(ret != 0))
@@ -558,9 +531,6 @@ int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
558 531
559 drm_vblank_cleanup(dev); 532 drm_vblank_cleanup(dev);
560 533
561 if (!list_empty(&dev_priv->sou_priv->active))
562 DRM_ERROR("Still have active outputs when unloading driver");
563
564 kfree(dev_priv->sou_priv); 534 kfree(dev_priv->sou_priv);
565 535
566 return 0; 536 return 0;
diff --git a/drivers/hwspinlock/u8500_hsem.c b/drivers/hwspinlock/u8500_hsem.c
index 143461a95ae4..86980fe04117 100644
--- a/drivers/hwspinlock/u8500_hsem.c
+++ b/drivers/hwspinlock/u8500_hsem.c
@@ -21,6 +21,7 @@
21 * General Public License for more details. 21 * General Public License for more details.
22 */ 22 */
23 23
24#include <linux/module.h>
24#include <linux/delay.h> 25#include <linux/delay.h>
25#include <linux/io.h> 26#include <linux/io.h>
26#include <linux/pm_runtime.h> 27#include <linux/pm_runtime.h>
@@ -108,10 +109,8 @@ static int __devinit u8500_hsem_probe(struct platform_device *pdev)
108 return -ENODEV; 109 return -ENODEV;
109 110
110 io_base = ioremap(res->start, resource_size(res)); 111 io_base = ioremap(res->start, resource_size(res));
111 if (!io_base) { 112 if (!io_base)
112 ret = -ENOMEM; 113 return -ENOMEM;
113 goto free_state;
114 }
115 114
116 /* make sure protocol 1 is selected */ 115 /* make sure protocol 1 is selected */
117 val = readl(io_base + HSEM_CTRL_REG); 116 val = readl(io_base + HSEM_CTRL_REG);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 04b09564bfa9..8126824daccb 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -43,7 +43,6 @@
43/* For SCSI -> ATAPI command conversion */ 43/* For SCSI -> ATAPI command conversion */
44#include <scsi/scsi.h> 44#include <scsi/scsi.h>
45 45
46#include <linux/irq.h>
47#include <linux/io.h> 46#include <linux/io.h>
48#include <asm/byteorder.h> 47#include <asm/byteorder.h>
49#include <linux/uaccess.h> 48#include <linux/uaccess.h>
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 61fdf544fbd6..3d42043fec51 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -35,7 +35,6 @@
35#include <scsi/scsi_ioctl.h> 35#include <scsi/scsi_ioctl.h>
36 36
37#include <asm/byteorder.h> 37#include <asm/byteorder.h>
38#include <linux/irq.h>
39#include <linux/uaccess.h> 38#include <linux/uaccess.h>
40#include <linux/io.h> 39#include <linux/io.h>
41#include <asm/unaligned.h> 40#include <asm/unaligned.h>
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 7ecb1ade8874..ce8237d36159 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -41,7 +41,6 @@
41#include <scsi/scsi.h> 41#include <scsi/scsi.h>
42 42
43#include <asm/byteorder.h> 43#include <asm/byteorder.h>
44#include <linux/irq.h>
45#include <linux/uaccess.h> 44#include <linux/uaccess.h>
46#include <linux/io.h> 45#include <linux/io.h>
47#include <asm/unaligned.h> 46#include <asm/unaligned.h>
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 18767f8ab090..5d2f8e13cf0e 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -82,7 +82,8 @@ static unsigned int mwait_substates;
82static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ 82static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */
83 83
84static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; 84static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
85static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); 85static int intel_idle(struct cpuidle_device *dev,
86 struct cpuidle_driver *drv, int index);
86 87
87static struct cpuidle_state *cpuidle_state_table; 88static struct cpuidle_state *cpuidle_state_table;
88 89
@@ -110,7 +111,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
110 { /* MWAIT C1 */ 111 { /* MWAIT C1 */
111 .name = "C1-NHM", 112 .name = "C1-NHM",
112 .desc = "MWAIT 0x00", 113 .desc = "MWAIT 0x00",
113 .driver_data = (void *) 0x00,
114 .flags = CPUIDLE_FLAG_TIME_VALID, 114 .flags = CPUIDLE_FLAG_TIME_VALID,
115 .exit_latency = 3, 115 .exit_latency = 3,
116 .target_residency = 6, 116 .target_residency = 6,
@@ -118,7 +118,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
118 { /* MWAIT C2 */ 118 { /* MWAIT C2 */
119 .name = "C3-NHM", 119 .name = "C3-NHM",
120 .desc = "MWAIT 0x10", 120 .desc = "MWAIT 0x10",
121 .driver_data = (void *) 0x10,
122 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 121 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
123 .exit_latency = 20, 122 .exit_latency = 20,
124 .target_residency = 80, 123 .target_residency = 80,
@@ -126,7 +125,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
126 { /* MWAIT C3 */ 125 { /* MWAIT C3 */
127 .name = "C6-NHM", 126 .name = "C6-NHM",
128 .desc = "MWAIT 0x20", 127 .desc = "MWAIT 0x20",
129 .driver_data = (void *) 0x20,
130 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 128 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
131 .exit_latency = 200, 129 .exit_latency = 200,
132 .target_residency = 800, 130 .target_residency = 800,
@@ -138,7 +136,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
138 { /* MWAIT C1 */ 136 { /* MWAIT C1 */
139 .name = "C1-SNB", 137 .name = "C1-SNB",
140 .desc = "MWAIT 0x00", 138 .desc = "MWAIT 0x00",
141 .driver_data = (void *) 0x00,
142 .flags = CPUIDLE_FLAG_TIME_VALID, 139 .flags = CPUIDLE_FLAG_TIME_VALID,
143 .exit_latency = 1, 140 .exit_latency = 1,
144 .target_residency = 1, 141 .target_residency = 1,
@@ -146,7 +143,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
146 { /* MWAIT C2 */ 143 { /* MWAIT C2 */
147 .name = "C3-SNB", 144 .name = "C3-SNB",
148 .desc = "MWAIT 0x10", 145 .desc = "MWAIT 0x10",
149 .driver_data = (void *) 0x10,
150 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 146 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
151 .exit_latency = 80, 147 .exit_latency = 80,
152 .target_residency = 211, 148 .target_residency = 211,
@@ -154,7 +150,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
154 { /* MWAIT C3 */ 150 { /* MWAIT C3 */
155 .name = "C6-SNB", 151 .name = "C6-SNB",
156 .desc = "MWAIT 0x20", 152 .desc = "MWAIT 0x20",
157 .driver_data = (void *) 0x20,
158 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 153 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
159 .exit_latency = 104, 154 .exit_latency = 104,
160 .target_residency = 345, 155 .target_residency = 345,
@@ -162,7 +157,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
162 { /* MWAIT C4 */ 157 { /* MWAIT C4 */
163 .name = "C7-SNB", 158 .name = "C7-SNB",
164 .desc = "MWAIT 0x30", 159 .desc = "MWAIT 0x30",
165 .driver_data = (void *) 0x30,
166 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 160 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
167 .exit_latency = 109, 161 .exit_latency = 109,
168 .target_residency = 345, 162 .target_residency = 345,
@@ -174,7 +168,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
174 { /* MWAIT C1 */ 168 { /* MWAIT C1 */
175 .name = "C1-ATM", 169 .name = "C1-ATM",
176 .desc = "MWAIT 0x00", 170 .desc = "MWAIT 0x00",
177 .driver_data = (void *) 0x00,
178 .flags = CPUIDLE_FLAG_TIME_VALID, 171 .flags = CPUIDLE_FLAG_TIME_VALID,
179 .exit_latency = 1, 172 .exit_latency = 1,
180 .target_residency = 4, 173 .target_residency = 4,
@@ -182,7 +175,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
182 { /* MWAIT C2 */ 175 { /* MWAIT C2 */
183 .name = "C2-ATM", 176 .name = "C2-ATM",
184 .desc = "MWAIT 0x10", 177 .desc = "MWAIT 0x10",
185 .driver_data = (void *) 0x10,
186 .flags = CPUIDLE_FLAG_TIME_VALID, 178 .flags = CPUIDLE_FLAG_TIME_VALID,
187 .exit_latency = 20, 179 .exit_latency = 20,
188 .target_residency = 80, 180 .target_residency = 80,
@@ -191,7 +183,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
191 { /* MWAIT C4 */ 183 { /* MWAIT C4 */
192 .name = "C4-ATM", 184 .name = "C4-ATM",
193 .desc = "MWAIT 0x30", 185 .desc = "MWAIT 0x30",
194 .driver_data = (void *) 0x30,
195 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 186 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
196 .exit_latency = 100, 187 .exit_latency = 100,
197 .target_residency = 400, 188 .target_residency = 400,
@@ -200,23 +191,55 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
200 { /* MWAIT C6 */ 191 { /* MWAIT C6 */
201 .name = "C6-ATM", 192 .name = "C6-ATM",
202 .desc = "MWAIT 0x52", 193 .desc = "MWAIT 0x52",
203 .driver_data = (void *) 0x52,
204 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 194 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
205 .exit_latency = 140, 195 .exit_latency = 140,
206 .target_residency = 560, 196 .target_residency = 560,
207 .enter = &intel_idle }, 197 .enter = &intel_idle },
208}; 198};
209 199
200static int get_driver_data(int cstate)
201{
202 int driver_data;
203 switch (cstate) {
204
205 case 1: /* MWAIT C1 */
206 driver_data = 0x00;
207 break;
208 case 2: /* MWAIT C2 */
209 driver_data = 0x10;
210 break;
211 case 3: /* MWAIT C3 */
212 driver_data = 0x20;
213 break;
214 case 4: /* MWAIT C4 */
215 driver_data = 0x30;
216 break;
217 case 5: /* MWAIT C5 */
218 driver_data = 0x40;
219 break;
220 case 6: /* MWAIT C6 */
221 driver_data = 0x52;
222 break;
223 default:
224 driver_data = 0x00;
225 }
226 return driver_data;
227}
228
210/** 229/**
211 * intel_idle 230 * intel_idle
212 * @dev: cpuidle_device 231 * @dev: cpuidle_device
213 * @state: cpuidle state 232 * @drv: cpuidle driver
233 * @index: index of cpuidle state
214 * 234 *
215 */ 235 */
216static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) 236static int intel_idle(struct cpuidle_device *dev,
237 struct cpuidle_driver *drv, int index)
217{ 238{
218 unsigned long ecx = 1; /* break on interrupt flag */ 239 unsigned long ecx = 1; /* break on interrupt flag */
219 unsigned long eax = (unsigned long)cpuidle_get_statedata(state); 240 struct cpuidle_state *state = &drv->states[index];
241 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
242 unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage);
220 unsigned int cstate; 243 unsigned int cstate;
221 ktime_t kt_before, kt_after; 244 ktime_t kt_before, kt_after;
222 s64 usec_delta; 245 s64 usec_delta;
@@ -257,7 +280,10 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
257 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 280 if (!(lapic_timer_reliable_states & (1 << (cstate))))
258 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); 281 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
259 282
260 return usec_delta; 283 /* Update cpuidle counters */
284 dev->last_residency = (int)usec_delta;
285
286 return index;
261} 287}
262 288
263static void __setup_broadcast_timer(void *arg) 289static void __setup_broadcast_timer(void *arg)
@@ -398,6 +424,60 @@ static void intel_idle_cpuidle_devices_uninit(void)
398 return; 424 return;
399} 425}
400/* 426/*
427 * intel_idle_cpuidle_driver_init()
428 * allocate, initialize cpuidle_states
429 */
430static int intel_idle_cpuidle_driver_init(void)
431{
432 int cstate;
433 struct cpuidle_driver *drv = &intel_idle_driver;
434
435 drv->state_count = 1;
436
437 for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
438 int num_substates;
439
440 if (cstate > max_cstate) {
441 printk(PREFIX "max_cstate %d reached\n",
442 max_cstate);
443 break;
444 }
445
446 /* does the state exist in CPUID.MWAIT? */
447 num_substates = (mwait_substates >> ((cstate) * 4))
448 & MWAIT_SUBSTATE_MASK;
449 if (num_substates == 0)
450 continue;
451 /* is the state not enabled? */
452 if (cpuidle_state_table[cstate].enter == NULL) {
453 /* does the driver not know about the state? */
454 if (*cpuidle_state_table[cstate].name == '\0')
455 pr_debug(PREFIX "unaware of model 0x%x"
456 " MWAIT %d please"
457 " contact lenb@kernel.org",
458 boot_cpu_data.x86_model, cstate);
459 continue;
460 }
461
462 if ((cstate > 2) &&
463 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
464 mark_tsc_unstable("TSC halts in idle"
465 " states deeper than C2");
466
467 drv->states[drv->state_count] = /* structure copy */
468 cpuidle_state_table[cstate];
469
470 drv->state_count += 1;
471 }
472
473 if (auto_demotion_disable_flags)
474 smp_call_function(auto_demotion_disable, NULL, 1);
475
476 return 0;
477}
478
479
480/*
401 * intel_idle_cpuidle_devices_init() 481 * intel_idle_cpuidle_devices_init()
402 * allocate, initialize, register cpuidle_devices 482 * allocate, initialize, register cpuidle_devices
403 */ 483 */
@@ -431,22 +511,11 @@ static int intel_idle_cpuidle_devices_init(void)
431 continue; 511 continue;
432 /* is the state not enabled? */ 512 /* is the state not enabled? */
433 if (cpuidle_state_table[cstate].enter == NULL) { 513 if (cpuidle_state_table[cstate].enter == NULL) {
434 /* does the driver not know about the state? */
435 if (*cpuidle_state_table[cstate].name == '\0')
436 pr_debug(PREFIX "unaware of model 0x%x"
437 " MWAIT %d please"
438 " contact lenb@kernel.org",
439 boot_cpu_data.x86_model, cstate);
440 continue; 514 continue;
441 } 515 }
442 516
443 if ((cstate > 2) && 517 dev->states_usage[dev->state_count].driver_data =
444 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 518 (void *)get_driver_data(cstate);
445 mark_tsc_unstable("TSC halts in idle"
446 " states deeper than C2");
447
448 dev->states[dev->state_count] = /* structure copy */
449 cpuidle_state_table[cstate];
450 519
451 dev->state_count += 1; 520 dev->state_count += 1;
452 } 521 }
@@ -459,8 +528,6 @@ static int intel_idle_cpuidle_devices_init(void)
459 return -EIO; 528 return -EIO;
460 } 529 }
461 } 530 }
462 if (auto_demotion_disable_flags)
463 smp_call_function(auto_demotion_disable, NULL, 1);
464 531
465 return 0; 532 return 0;
466} 533}
@@ -478,6 +545,7 @@ static int __init intel_idle_init(void)
478 if (retval) 545 if (retval)
479 return retval; 546 return retval;
480 547
548 intel_idle_cpuidle_driver_init();
481 retval = cpuidle_register_driver(&intel_idle_driver); 549 retval = cpuidle_register_driver(&intel_idle_driver);
482 if (retval) { 550 if (retval) {
483 printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", 551 printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 9c192e79f806..288da5c1499d 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/err.h> 14#include <linux/err.h>
14#include <linux/clk.h> 15#include <linux/clk.h>
15#include <linux/io.h> 16#include <linux/io.h>
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
index e8fdb8830f69..46be456fcc00 100644
--- a/drivers/iommu/omap-iovmm.c
+++ b/drivers/iommu/omap-iovmm.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/err.h> 14#include <linux/err.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
index 817f37a875c9..c9570fcf1cce 100644
--- a/drivers/macintosh/via-macii.c
+++ b/drivers/macintosh/via-macii.c
@@ -159,7 +159,7 @@ int macii_init(void)
159 err = macii_init_via(); 159 err = macii_init_via();
160 if (err) goto out; 160 if (err) goto out;
161 161
162 err = request_irq(IRQ_MAC_ADB, macii_interrupt, IRQ_FLG_LOCK, "ADB", 162 err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB",
163 macii_interrupt); 163 macii_interrupt);
164 if (err) goto out; 164 if (err) goto out;
165 165
diff --git a/drivers/macintosh/via-maciisi.c b/drivers/macintosh/via-maciisi.c
index 9ab5b0c34f0d..34d02a91b29f 100644
--- a/drivers/macintosh/via-maciisi.c
+++ b/drivers/macintosh/via-maciisi.c
@@ -122,8 +122,8 @@ maciisi_init(void)
122 return err; 122 return err;
123 } 123 }
124 124
125 if (request_irq(IRQ_MAC_ADB, maciisi_interrupt, IRQ_FLG_LOCK | IRQ_FLG_FAST, 125 if (request_irq(IRQ_MAC_ADB, maciisi_interrupt, 0, "ADB",
126 "ADB", maciisi_interrupt)) { 126 maciisi_interrupt)) {
127 printk(KERN_ERR "maciisi_init: can't get irq %d\n", IRQ_MAC_ADB); 127 printk(KERN_ERR "maciisi_init: can't get irq %d\n", IRQ_MAC_ADB);
128 return -EAGAIN; 128 return -EAGAIN;
129 } 129 }
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index cb246667dd52..0a6806f80ab5 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -14,6 +14,7 @@
14#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
15#include <linux/version.h> 15#include <linux/version.h>
16#include <linux/shrinker.h> 16#include <linux/shrinker.h>
17#include <linux/module.h>
17 18
18#define DM_MSG_PREFIX "bufio" 19#define DM_MSG_PREFIX "bufio"
19 20
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 65fd85ec6514..023fbc2d389e 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -8,7 +8,7 @@
8#include "dm-btree-internal.h" 8#include "dm-btree-internal.h"
9#include "dm-transaction-manager.h" 9#include "dm-transaction-manager.h"
10 10
11#include <linux/module.h> 11#include <linux/export.h>
12 12
13/* 13/*
14 * Removing an entry from a btree 14 * Removing an entry from a btree
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index e0638be53ea4..bd1e7ffbe26c 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -8,7 +8,7 @@
8#include "dm-space-map.h" 8#include "dm-space-map.h"
9#include "dm-transaction-manager.h" 9#include "dm-transaction-manager.h"
10 10
11#include <linux/module.h> 11#include <linux/export.h>
12#include <linux/device-mapper.h> 12#include <linux/device-mapper.h>
13 13
14#define DM_MSG_PREFIX "btree" 14#define DM_MSG_PREFIX "btree"
diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
index bb44a937fe63..50ed53bf4aa2 100644
--- a/drivers/md/persistent-data/dm-space-map-checker.c
+++ b/drivers/md/persistent-data/dm-space-map-checker.c
@@ -7,6 +7,7 @@
7#include "dm-space-map-checker.h" 7#include "dm-space-map-checker.h"
8 8
9#include <linux/device-mapper.h> 9#include <linux/device-mapper.h>
10#include <linux/export.h>
10 11
11#ifdef CONFIG_DM_DEBUG_SPACE_MAPS 12#ifdef CONFIG_DM_DEBUG_SPACE_MAPS
12 13
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index aeff7852cf79..fc469ba9f627 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/module.h> 15#include <linux/export.h>
16#include <linux/device-mapper.h> 16#include <linux/device-mapper.h>
17 17
18#define DM_MSG_PREFIX "space map disk" 18#define DM_MSG_PREFIX "space map disk"
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 728e89a3f978..6f8d38747d7f 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -10,7 +10,7 @@
10#include "dm-space-map-metadata.h" 10#include "dm-space-map-metadata.h"
11#include "dm-persistent-data-internal.h" 11#include "dm-persistent-data-internal.h"
12 12
13#include <linux/module.h> 13#include <linux/export.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/device-mapper.h> 15#include <linux/device-mapper.h>
16 16
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 472aedfb07cf..297e26092178 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3110,7 +3110,7 @@ static void handle_stripe(struct stripe_head *sh)
3110 struct r5dev *pdev, *qdev; 3110 struct r5dev *pdev, *qdev;
3111 3111
3112 clear_bit(STRIPE_HANDLE, &sh->state); 3112 clear_bit(STRIPE_HANDLE, &sh->state);
3113 if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) { 3113 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
3114 /* already being handled, ensure it gets handled 3114 /* already being handled, ensure it gets handled
3115 * again when current action finishes */ 3115 * again when current action finishes */
3116 set_bit(STRIPE_HANDLE, &sh->state); 3116 set_bit(STRIPE_HANDLE, &sh->state);
@@ -3159,10 +3159,14 @@ static void handle_stripe(struct stripe_head *sh)
3159 /* check if the array has lost more than max_degraded devices and, 3159 /* check if the array has lost more than max_degraded devices and,
3160 * if so, some requests might need to be failed. 3160 * if so, some requests might need to be failed.
3161 */ 3161 */
3162 if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written) 3162 if (s.failed > conf->max_degraded) {
3163 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); 3163 sh->check_state = 0;
3164 if (s.failed > conf->max_degraded && s.syncing) 3164 sh->reconstruct_state = 0;
3165 handle_failed_sync(conf, sh, &s); 3165 if (s.to_read+s.to_write+s.written)
3166 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3167 if (s.syncing)
3168 handle_failed_sync(conf, sh, &s);
3169 }
3166 3170
3167 /* 3171 /*
3168 * might be able to return some write requests if the parity blocks 3172 * might be able to return some write requests if the parity blocks
@@ -3371,7 +3375,7 @@ finish:
3371 3375
3372 return_io(s.return_bi); 3376 return_io(s.return_bi);
3373 3377
3374 clear_bit(STRIPE_ACTIVE, &sh->state); 3378 clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
3375} 3379}
3376 3380
3377static void raid5_activate_delayed(struct r5conf *conf) 3381static void raid5_activate_delayed(struct r5conf *conf)
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c b/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c
index 2e8c288258a9..34434557ef65 100644
--- a/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c
@@ -398,7 +398,6 @@ static int mxl111sf_i2c_readagain(struct mxl111sf_state *state,
398 u8 i2c_r_data[24]; 398 u8 i2c_r_data[24];
399 u8 i = 0; 399 u8 i = 0;
400 u8 fifo_status = 0; 400 u8 fifo_status = 0;
401 int ret;
402 int status = 0; 401 int status = 0;
403 402
404 mxl_i2c("read %d bytes", count); 403 mxl_i2c("read %d bytes", count);
@@ -418,7 +417,7 @@ static int mxl111sf_i2c_readagain(struct mxl111sf_state *state,
418 i2c_w_data[4+(i*3)] = 0x00; 417 i2c_w_data[4+(i*3)] = 0x00;
419 } 418 }
420 419
421 ret = mxl111sf_i2c_get_data(state, 0, i2c_w_data, i2c_r_data); 420 mxl111sf_i2c_get_data(state, 0, i2c_w_data, i2c_r_data);
422 421
423 /* Check for I2C NACK status */ 422 /* Check for I2C NACK status */
424 if (mxl111sf_i2c_check_status(state) == 1) { 423 if (mxl111sf_i2c_check_status(state) == 1) {
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-phy.c b/drivers/media/dvb/dvb-usb/mxl111sf-phy.c
index 91dc1fc2825b..b741b3a7a325 100644
--- a/drivers/media/dvb/dvb-usb/mxl111sf-phy.c
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-phy.c
@@ -296,8 +296,7 @@ int mxl111sf_config_spi(struct mxl111sf_state *state, int onoff)
296 goto fail; 296 goto fail;
297 297
298 ret = mxl111sf_write_reg(state, 0x00, 0x00); 298 ret = mxl111sf_write_reg(state, 0x00, 0x00);
299 if (mxl_fail(ret)) 299 mxl_fail(ret);
300 goto fail;
301fail: 300fail:
302 return ret; 301 return ret;
303} 302}
@@ -328,11 +327,13 @@ int mxl111sf_idac_config(struct mxl111sf_state *state,
328 /* set hysteresis value reg: 0x0B<5:0> */ 327 /* set hysteresis value reg: 0x0B<5:0> */
329 ret = mxl111sf_write_reg(state, V6_IDAC_HYSTERESIS_REG, 328 ret = mxl111sf_write_reg(state, V6_IDAC_HYSTERESIS_REG,
330 (hysteresis_value & 0x3F)); 329 (hysteresis_value & 0x3F));
330 mxl_fail(ret);
331 } 331 }
332 332
333 ret = mxl111sf_write_reg(state, V6_IDAC_SETTINGS_REG, val); 333 ret = mxl111sf_write_reg(state, V6_IDAC_SETTINGS_REG, val);
334 mxl_fail(ret);
334 335
335 return val; 336 return ret;
336} 337}
337 338
338/* 339/*
diff --git a/drivers/media/video/s5k6aa.c b/drivers/media/video/s5k6aa.c
index 2446736b7871..0df7f2a41814 100644
--- a/drivers/media/video/s5k6aa.c
+++ b/drivers/media/video/s5k6aa.c
@@ -19,6 +19,7 @@
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20#include <linux/i2c.h> 20#include <linux/i2c.h>
21#include <linux/media.h> 21#include <linux/media.h>
22#include <linux/module.h>
22#include <linux/regulator/consumer.h> 23#include <linux/regulator/consumer.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
24 25
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
index 725634d9736d..844a4d7797bc 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -220,8 +220,8 @@ static int vidioc_querycap(struct file *file, void *priv,
220 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1); 220 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
221 cap->bus_info[0] = 0; 221 cap->bus_info[0] = 0;
222 cap->version = KERNEL_VERSION(1, 0, 0); 222 cap->version = KERNEL_VERSION(1, 0, 0);
223 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT 223 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
224 | V4L2_CAP_STREAMING; 224 V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_STREAMING;
225 return 0; 225 return 0;
226} 226}
227 227
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index ecef127dbc66..1e8cdb77d4b8 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -785,8 +785,8 @@ static int vidioc_querycap(struct file *file, void *priv,
785 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1); 785 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
786 cap->bus_info[0] = 0; 786 cap->bus_info[0] = 0;
787 cap->version = KERNEL_VERSION(1, 0, 0); 787 cap->version = KERNEL_VERSION(1, 0, 0);
788 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE 788 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE
789 | V4L2_CAP_VIDEO_OUTPUT 789 | V4L2_CAP_VIDEO_OUTPUT_MPLANE
790 | V4L2_CAP_STREAMING; 790 | V4L2_CAP_STREAMING;
791 return 0; 791 return 0;
792} 792}
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 10c2364f3e8a..254d32688843 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -1016,7 +1016,8 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
1016 1016
1017 menu_info = &mapping->menu_info[query_menu->index]; 1017 menu_info = &mapping->menu_info[query_menu->index];
1018 1018
1019 if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) { 1019 if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
1020 (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) {
1020 s32 bitmap; 1021 s32 bitmap;
1021 1022
1022 if (!ctrl->cached) { 1023 if (!ctrl->cached) {
@@ -1225,7 +1226,8 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
1225 /* Valid menu indices are reported by the GET_RES request for 1226 /* Valid menu indices are reported by the GET_RES request for
1226 * UVC controls that support it. 1227 * UVC controls that support it.
1227 */ 1228 */
1228 if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) { 1229 if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
1230 (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) {
1229 if (!ctrl->cached) { 1231 if (!ctrl->cached) {
1230 ret = uvc_ctrl_populate_cache(chain, ctrl); 1232 ret = uvc_ctrl_populate_cache(chain, ctrl);
1231 if (ret < 0) 1233 if (ret < 0)
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index f17f92b86a30..0f415dade05a 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -821,8 +821,8 @@ static void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
821 fill_event(&ev, ctrl, changes); 821 fill_event(&ev, ctrl, changes);
822 822
823 list_for_each_entry(sev, &ctrl->ev_subs, node) 823 list_for_each_entry(sev, &ctrl->ev_subs, node)
824 if (sev->fh && (sev->fh != fh || 824 if (sev->fh != fh ||
825 (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))) 825 (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))
826 v4l2_event_queue_fh(sev->fh, &ev); 826 v4l2_event_queue_fh(sev->fh, &ev);
827} 827}
828 828
@@ -947,6 +947,7 @@ static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
947 if (ctrl->cluster[0]->has_volatiles) 947 if (ctrl->cluster[0]->has_volatiles)
948 ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; 948 ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
949 } 949 }
950 fh = NULL;
950 } 951 }
951 if (changed || update_inactive) { 952 if (changed || update_inactive) {
952 /* If a control was changed that was not one of the controls 953 /* If a control was changed that was not one of the controls
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
index 46037f225529..c26ad9637143 100644
--- a/drivers/media/video/v4l2-event.c
+++ b/drivers/media/video/v4l2-event.c
@@ -216,6 +216,9 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
216 unsigned long flags; 216 unsigned long flags;
217 unsigned i; 217 unsigned i;
218 218
219 if (sub->type == V4L2_EVENT_ALL)
220 return -EINVAL;
221
219 if (elems < 1) 222 if (elems < 1)
220 elems = 1; 223 elems = 1;
221 if (sub->type == V4L2_EVENT_CTRL) { 224 if (sub->type == V4L2_EVENT_CTRL) {
@@ -283,6 +286,7 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
283{ 286{
284 struct v4l2_subscribed_event *sev; 287 struct v4l2_subscribed_event *sev;
285 unsigned long flags; 288 unsigned long flags;
289 int i;
286 290
287 if (sub->type == V4L2_EVENT_ALL) { 291 if (sub->type == V4L2_EVENT_ALL) {
288 v4l2_event_unsubscribe_all(fh); 292 v4l2_event_unsubscribe_all(fh);
@@ -293,8 +297,12 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
293 297
294 sev = v4l2_event_subscribed(fh, sub->type, sub->id); 298 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
295 if (sev != NULL) { 299 if (sev != NULL) {
300 /* Remove any pending events for this subscription */
301 for (i = 0; i < sev->in_use; i++) {
302 list_del(&sev->events[sev_pos(sev, i)].list);
303 fh->navailable--;
304 }
296 list_del(&sev->list); 305 list_del(&sev->list);
297 sev->fh = NULL;
298 } 306 }
299 307
300 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 308 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
index 979e544388cb..95a3f5e82aef 100644
--- a/drivers/media/video/videobuf2-core.c
+++ b/drivers/media/video/videobuf2-core.c
@@ -131,6 +131,7 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n)
131 continue; 131 continue;
132 132
133 for (plane = 0; plane < vb->num_planes; ++plane) { 133 for (plane = 0; plane < vb->num_planes; ++plane) {
134 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
134 vb->v4l2_planes[plane].m.mem_offset = off; 135 vb->v4l2_planes[plane].m.mem_offset = off;
135 136
136 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", 137 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
@@ -264,6 +265,7 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
264 q->num_buffers -= buffers; 265 q->num_buffers -= buffers;
265 if (!q->num_buffers) 266 if (!q->num_buffers)
266 q->memory = 0; 267 q->memory = 0;
268 INIT_LIST_HEAD(&q->queued_list);
267} 269}
268 270
269/** 271/**
@@ -296,14 +298,14 @@ static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
296{ 298{
297 unsigned int plane; 299 unsigned int plane;
298 for (plane = 0; plane < vb->num_planes; ++plane) { 300 for (plane = 0; plane < vb->num_planes; ++plane) {
301 void *mem_priv = vb->planes[plane].mem_priv;
299 /* 302 /*
300 * If num_users() has not been provided, call_memop 303 * If num_users() has not been provided, call_memop
301 * will return 0, apparently nobody cares about this 304 * will return 0, apparently nobody cares about this
302 * case anyway. If num_users() returns more than 1, 305 * case anyway. If num_users() returns more than 1,
303 * we are not the only user of the plane's memory. 306 * we are not the only user of the plane's memory.
304 */ 307 */
305 if (call_memop(q, plane, num_users, 308 if (mem_priv && call_memop(q, plane, num_users, mem_priv) > 1)
306 vb->planes[plane].mem_priv) > 1)
307 return true; 309 return true;
308 } 310 }
309 return false; 311 return false;
diff --git a/drivers/mfd/ab5500-core.c b/drivers/mfd/ab5500-core.c
index 4175544b491b..ec10629a0b0b 100644
--- a/drivers/mfd/ab5500-core.c
+++ b/drivers/mfd/ab5500-core.c
@@ -13,6 +13,7 @@
13 * TODO: Event handling with irq_chip. Waiting for PRCMU fw support. 13 * TODO: Event handling with irq_chip. Waiting for PRCMU fw support.
14 */ 14 */
15 15
16#include <linux/module.h>
16#include <linux/mutex.h> 17#include <linux/mutex.h>
17#include <linux/err.h> 18#include <linux/err.h>
18#include <linux/platform_device.h> 19#include <linux/platform_device.h>
diff --git a/drivers/mfd/ab5500-debugfs.c b/drivers/mfd/ab5500-debugfs.c
index 6be1fe6b5f9a..43c0ebb81956 100644
--- a/drivers/mfd/ab5500-debugfs.c
+++ b/drivers/mfd/ab5500-debugfs.c
@@ -4,6 +4,7 @@
4 * Debugfs support for the AB5500 MFD driver 4 * Debugfs support for the AB5500 MFD driver
5 */ 5 */
6 6
7#include <linux/export.h>
7#include <linux/debugfs.h> 8#include <linux/debugfs.h>
8#include <linux/seq_file.h> 9#include <linux/seq_file.h>
9#include <linux/mfd/ab5500/ab5500.h> 10#include <linux/mfd/ab5500/ab5500.h>
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index ae57769ba50d..4b976f00ea85 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -32,6 +32,7 @@
32/* VENDOR SPEC register */ 32/* VENDOR SPEC register */
33#define SDHCI_VENDOR_SPEC 0xC0 33#define SDHCI_VENDOR_SPEC 0xC0
34#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 34#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
35#define SDHCI_WTMK_LVL 0x44
35#define SDHCI_MIX_CTRL 0x48 36#define SDHCI_MIX_CTRL 0x48
36 37
37/* 38/*
@@ -476,6 +477,13 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
476 if (is_imx53_esdhc(imx_data)) 477 if (is_imx53_esdhc(imx_data))
477 imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; 478 imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
478 479
480 /*
481 * The imx6q ROM code will change the default watermark level setting
482 * to something insane. Change it back here.
483 */
484 if (is_imx6q_usdhc(imx_data))
485 writel(0x08100810, host->ioaddr + SDHCI_WTMK_LVL);
486
479 boarddata = &imx_data->boarddata; 487 boarddata = &imx_data->boarddata;
480 if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) { 488 if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
481 if (!host->mmc->parent->platform_data) { 489 if (!host->mmc->parent->platform_data) {
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 66b616ebe536..318a869286ab 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -12,27 +12,17 @@ menuconfig MTD
12 12
13if MTD 13if MTD
14 14
15config MTD_DEBUG
16 bool "Debugging"
17 help
18 This turns on low-level debugging for the entire MTD sub-system.
19 Normally, you should say 'N'.
20
21config MTD_DEBUG_VERBOSE
22 int "Debugging verbosity (0 = quiet, 3 = noisy)"
23 depends on MTD_DEBUG
24 default "0"
25 help
26 Determines the verbosity level of the MTD debugging messages.
27
28config MTD_TESTS 15config MTD_TESTS
29 tristate "MTD tests support" 16 tristate "MTD tests support (DANGEROUS)"
30 depends on m 17 depends on m
31 help 18 help
32 This option includes various MTD tests into compilation. The tests 19 This option includes various MTD tests into compilation. The tests
33 should normally be compiled as kernel modules. The modules perform 20 should normally be compiled as kernel modules. The modules perform
34 various checks and verifications when loaded. 21 various checks and verifications when loaded.
35 22
23 WARNING: some of the tests will ERASE entire MTD device which they
24 test. Do not use these tests unless you really know what you do.
25
36config MTD_REDBOOT_PARTS 26config MTD_REDBOOT_PARTS
37 tristate "RedBoot partition table parsing" 27 tristate "RedBoot partition table parsing"
38 ---help--- 28 ---help---
@@ -137,7 +127,8 @@ config MTD_AFS_PARTS
137 'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example. 127 'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example.
138 128
139config MTD_OF_PARTS 129config MTD_OF_PARTS
140 def_bool y 130 tristate "OpenFirmware partitioning information support"
131 default Y
141 depends on OF 132 depends on OF
142 help 133 help
143 This provides a partition parsing function which derives 134 This provides a partition parsing function which derives
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 39664c4229ff..9aaac3ac89f3 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -5,8 +5,8 @@
5# Core functionality. 5# Core functionality.
6obj-$(CONFIG_MTD) += mtd.o 6obj-$(CONFIG_MTD) += mtd.o
7mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o 7mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o
8mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o
9 8
9obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
10obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o 10obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
11obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o 11obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
12obj-$(CONFIG_MTD_AFS_PARTS) += afs.o 12obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
index 302372c08b56..89a02f6f65dc 100644
--- a/drivers/mtd/afs.c
+++ b/drivers/mtd/afs.c
@@ -162,8 +162,8 @@ afs_read_iis(struct mtd_info *mtd, struct image_info_struct *iis, u_int ptr)
162} 162}
163 163
164static int parse_afs_partitions(struct mtd_info *mtd, 164static int parse_afs_partitions(struct mtd_info *mtd,
165 struct mtd_partition **pparts, 165 struct mtd_partition **pparts,
166 unsigned long origin) 166 struct mtd_part_parser_data *data)
167{ 167{
168 struct mtd_partition *parts; 168 struct mtd_partition *parts;
169 u_int mask, off, idx, sz; 169 u_int mask, off, idx, sz;
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index 95949b97de6a..f40ea4547554 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -47,7 +47,7 @@ struct ar7_bin_rec {
47 47
48static int create_mtd_partitions(struct mtd_info *master, 48static int create_mtd_partitions(struct mtd_info *master,
49 struct mtd_partition **pparts, 49 struct mtd_partition **pparts,
50 unsigned long origin) 50 struct mtd_part_parser_data *data)
51{ 51{
52 struct ar7_bin_rec header; 52 struct ar7_bin_rec header;
53 unsigned int offset; 53 unsigned int offset;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 23175edd5634..8d70895a58d6 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -145,8 +145,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd)
145 if (((major << 8) | minor) < 0x3131) { 145 if (((major << 8) | minor) < 0x3131) {
146 /* CFI version 1.0 => don't trust bootloc */ 146 /* CFI version 1.0 => don't trust bootloc */
147 147
148 DEBUG(MTD_DEBUG_LEVEL1, 148 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
149 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
150 map->name, cfi->mfr, cfi->id); 149 map->name, cfi->mfr, cfi->id);
151 150
152 /* AFAICS all 29LV400 with a bottom boot block have a device ID 151 /* AFAICS all 29LV400 with a bottom boot block have a device ID
@@ -166,8 +165,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd)
166 * the 8-bit device ID. 165 * the 8-bit device ID.
167 */ 166 */
168 (cfi->mfr == CFI_MFR_MACRONIX)) { 167 (cfi->mfr == CFI_MFR_MACRONIX)) {
169 DEBUG(MTD_DEBUG_LEVEL1, 168 pr_debug("%s: Macronix MX29LV400C with bottom boot block"
170 "%s: Macronix MX29LV400C with bottom boot block"
171 " detected\n", map->name); 169 " detected\n", map->name);
172 extp->TopBottom = 2; /* bottom boot */ 170 extp->TopBottom = 2; /* bottom boot */
173 } else 171 } else
@@ -178,8 +176,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd)
178 extp->TopBottom = 2; /* bottom boot */ 176 extp->TopBottom = 2; /* bottom boot */
179 } 177 }
180 178
181 DEBUG(MTD_DEBUG_LEVEL1, 179 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
182 "%s: AMD CFI PRI V%c.%c has no boot block field;"
183 " deduced %s from Device ID\n", map->name, major, minor, 180 " deduced %s from Device ID\n", map->name, major, minor,
184 extp->TopBottom == 2 ? "bottom" : "top"); 181 extp->TopBottom == 2 ? "bottom" : "top");
185 } 182 }
@@ -191,7 +188,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd)
191 struct map_info *map = mtd->priv; 188 struct map_info *map = mtd->priv;
192 struct cfi_private *cfi = map->fldrv_priv; 189 struct cfi_private *cfi = map->fldrv_priv;
193 if (cfi->cfiq->BufWriteTimeoutTyp) { 190 if (cfi->cfiq->BufWriteTimeoutTyp) {
194 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" ); 191 pr_debug("Using buffer write method\n" );
195 mtd->write = cfi_amdstd_write_buffers; 192 mtd->write = cfi_amdstd_write_buffers;
196 } 193 }
197} 194}
@@ -443,8 +440,8 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
443 mtd->writesize = 1; 440 mtd->writesize = 1;
444 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 441 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
445 442
446 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n", 443 pr_debug("MTD %s(): write buffer size %d\n", __func__,
447 __func__, mtd->writebufsize); 444 mtd->writebufsize);
448 445
449 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 446 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
450 447
@@ -1163,7 +1160,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1163 return ret; 1160 return ret;
1164 } 1161 }
1165 1162
1166 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1163 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1167 __func__, adr, datum.x[0] ); 1164 __func__, adr, datum.x[0] );
1168 1165
1169 /* 1166 /*
@@ -1174,7 +1171,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1174 */ 1171 */
1175 oldd = map_read(map, adr); 1172 oldd = map_read(map, adr);
1176 if (map_word_equal(map, oldd, datum)) { 1173 if (map_word_equal(map, oldd, datum)) {
1177 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n", 1174 pr_debug("MTD %s(): NOP\n",
1178 __func__); 1175 __func__);
1179 goto op_done; 1176 goto op_done;
1180 } 1177 }
@@ -1400,7 +1397,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1400 1397
1401 datum = map_word_load(map, buf); 1398 datum = map_word_load(map, buf);
1402 1399
1403 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1400 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1404 __func__, adr, datum.x[0] ); 1401 __func__, adr, datum.x[0] );
1405 1402
1406 XIP_INVAL_CACHED_RANGE(map, adr, len); 1403 XIP_INVAL_CACHED_RANGE(map, adr, len);
@@ -1587,7 +1584,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1587 return ret; 1584 return ret;
1588 } 1585 }
1589 1586
1590 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1587 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
1591 __func__, chip->start ); 1588 __func__, chip->start );
1592 1589
1593 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 1590 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
@@ -1675,7 +1672,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1675 return ret; 1672 return ret;
1676 } 1673 }
1677 1674
1678 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1675 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
1679 __func__, adr ); 1676 __func__, adr );
1680 1677
1681 XIP_INVAL_CACHED_RANGE(map, adr, len); 1678 XIP_INVAL_CACHED_RANGE(map, adr, len);
@@ -1801,8 +1798,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1801 goto out_unlock; 1798 goto out_unlock;
1802 chip->state = FL_LOCKING; 1799 chip->state = FL_LOCKING;
1803 1800
1804 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", 1801 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
1805 __func__, adr, len);
1806 1802
1807 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1803 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1808 cfi->device_type, NULL); 1804 cfi->device_type, NULL);
@@ -1837,8 +1833,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1837 goto out_unlock; 1833 goto out_unlock;
1838 chip->state = FL_UNLOCKING; 1834 chip->state = FL_UNLOCKING;
1839 1835
1840 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", 1836 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
1841 __func__, adr, len);
1842 1837
1843 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1838 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1844 cfi->device_type, NULL); 1839 cfi->device_type, NULL);
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index 5e3cc80128aa..89c6595454a5 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -34,8 +34,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
34 34
35 /* Refuse the operation if the we cannot look behind the chip */ 35 /* Refuse the operation if the we cannot look behind the chip */
36 if (chip->start < 0x400000) { 36 if (chip->start < 0x400000) {
37 DEBUG( MTD_DEBUG_LEVEL3, 37 pr_debug( "MTD %s(): chip->start: %lx wanted >= 0x400000\n",
38 "MTD %s(): chip->start: %lx wanted >= 0x400000\n",
39 __func__, chip->start ); 38 __func__, chip->start );
40 return -EIO; 39 return -EIO;
41 } 40 }
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index ea832ea0e4aa..c443f527a53a 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1914,11 +1914,10 @@ static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
1914 * (oh and incidentaly the jedec spec - 3.5.3.3) the reset 1914 * (oh and incidentaly the jedec spec - 3.5.3.3) the reset
1915 * sequence is *supposed* to be 0xaa at 0x5555, 0x55 at 1915 * sequence is *supposed* to be 0xaa at 0x5555, 0x55 at
1916 * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips 1916 * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
1917 * as they will ignore the writes and dont care what address 1917 * as they will ignore the writes and don't care what address
1918 * the F0 is written to */ 1918 * the F0 is written to */
1919 if (cfi->addr_unlock1) { 1919 if (cfi->addr_unlock1) {
1920 DEBUG( MTD_DEBUG_LEVEL3, 1920 pr_debug( "reset unlock called %x %x \n",
1921 "reset unlock called %x %x \n",
1922 cfi->addr_unlock1,cfi->addr_unlock2); 1921 cfi->addr_unlock1,cfi->addr_unlock2);
1923 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 1922 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
1924 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); 1923 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
@@ -1941,7 +1940,7 @@ static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int in
1941 uint8_t uaddr; 1940 uint8_t uaddr;
1942 1941
1943 if (!(jedec_table[index].devtypes & cfi->device_type)) { 1942 if (!(jedec_table[index].devtypes & cfi->device_type)) {
1944 DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n", 1943 pr_debug("Rejecting potential %s with incompatible %d-bit device type\n",
1945 jedec_table[index].name, 4 * (1<<cfi->device_type)); 1944 jedec_table[index].name, 4 * (1<<cfi->device_type));
1946 return 0; 1945 return 0;
1947 } 1946 }
@@ -2021,7 +2020,7 @@ static inline int jedec_match( uint32_t base,
2021 * there aren't. 2020 * there aren't.
2022 */ 2021 */
2023 if (finfo->dev_id > 0xff) { 2022 if (finfo->dev_id > 0xff) {
2024 DEBUG( MTD_DEBUG_LEVEL3, "%s(): ID is not 8bit\n", 2023 pr_debug("%s(): ID is not 8bit\n",
2025 __func__); 2024 __func__);
2026 goto match_done; 2025 goto match_done;
2027 } 2026 }
@@ -2045,12 +2044,10 @@ static inline int jedec_match( uint32_t base,
2045 } 2044 }
2046 2045
2047 /* the part size must fit in the memory window */ 2046 /* the part size must fit in the memory window */
2048 DEBUG( MTD_DEBUG_LEVEL3, 2047 pr_debug("MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
2049 "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
2050 __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) ); 2048 __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) );
2051 if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) { 2049 if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) {
2052 DEBUG( MTD_DEBUG_LEVEL3, 2050 pr_debug("MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
2053 "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
2054 __func__, finfo->mfr_id, finfo->dev_id, 2051 __func__, finfo->mfr_id, finfo->dev_id,
2055 1 << finfo->dev_size ); 2052 1 << finfo->dev_size );
2056 goto match_done; 2053 goto match_done;
@@ -2061,13 +2058,12 @@ static inline int jedec_match( uint32_t base,
2061 2058
2062 uaddr = finfo->uaddr; 2059 uaddr = finfo->uaddr;
2063 2060
2064 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n", 2061 pr_debug("MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
2065 __func__, cfi->addr_unlock1, cfi->addr_unlock2 ); 2062 __func__, cfi->addr_unlock1, cfi->addr_unlock2 );
2066 if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr 2063 if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
2067 && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 || 2064 && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 ||
2068 unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) { 2065 unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) {
2069 DEBUG( MTD_DEBUG_LEVEL3, 2066 pr_debug("MTD %s(): 0x%.4x 0x%.4x did not match\n",
2070 "MTD %s(): 0x%.4x 0x%.4x did not match\n",
2071 __func__, 2067 __func__,
2072 unlock_addrs[uaddr].addr1, 2068 unlock_addrs[uaddr].addr1,
2073 unlock_addrs[uaddr].addr2); 2069 unlock_addrs[uaddr].addr2);
@@ -2083,15 +2079,13 @@ static inline int jedec_match( uint32_t base,
2083 * FIXME - write a driver that takes all of the chip info as 2079 * FIXME - write a driver that takes all of the chip info as
2084 * module parameters, doesn't probe but forces a load. 2080 * module parameters, doesn't probe but forces a load.
2085 */ 2081 */
2086 DEBUG( MTD_DEBUG_LEVEL3, 2082 pr_debug("MTD %s(): check ID's disappear when not in ID mode\n",
2087 "MTD %s(): check ID's disappear when not in ID mode\n",
2088 __func__ ); 2083 __func__ );
2089 jedec_reset( base, map, cfi ); 2084 jedec_reset( base, map, cfi );
2090 mfr = jedec_read_mfr( map, base, cfi ); 2085 mfr = jedec_read_mfr( map, base, cfi );
2091 id = jedec_read_id( map, base, cfi ); 2086 id = jedec_read_id( map, base, cfi );
2092 if ( mfr == cfi->mfr && id == cfi->id ) { 2087 if ( mfr == cfi->mfr && id == cfi->id ) {
2093 DEBUG( MTD_DEBUG_LEVEL3, 2088 pr_debug("MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n"
2094 "MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n"
2095 "You might need to manually specify JEDEC parameters.\n", 2089 "You might need to manually specify JEDEC parameters.\n",
2096 __func__, cfi->mfr, cfi->id ); 2090 __func__, cfi->mfr, cfi->id );
2097 goto match_done; 2091 goto match_done;
@@ -2104,7 +2098,7 @@ static inline int jedec_match( uint32_t base,
2104 * Put the device back in ID mode - only need to do this if we 2098 * Put the device back in ID mode - only need to do this if we
2105 * were truly frobbing a real device. 2099 * were truly frobbing a real device.
2106 */ 2100 */
2107 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ ); 2101 pr_debug("MTD %s(): return to ID mode\n", __func__ );
2108 if (cfi->addr_unlock1) { 2102 if (cfi->addr_unlock1) {
2109 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 2103 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
2110 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); 2104 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
@@ -2167,13 +2161,11 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2167 2161
2168 cfi->mfr = jedec_read_mfr(map, base, cfi); 2162 cfi->mfr = jedec_read_mfr(map, base, cfi);
2169 cfi->id = jedec_read_id(map, base, cfi); 2163 cfi->id = jedec_read_id(map, base, cfi);
2170 DEBUG(MTD_DEBUG_LEVEL3, 2164 pr_debug("Search for id:(%02x %02x) interleave(%d) type(%d)\n",
2171 "Search for id:(%02x %02x) interleave(%d) type(%d)\n",
2172 cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type); 2165 cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
2173 for (i = 0; i < ARRAY_SIZE(jedec_table); i++) { 2166 for (i = 0; i < ARRAY_SIZE(jedec_table); i++) {
2174 if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) { 2167 if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
2175 DEBUG( MTD_DEBUG_LEVEL3, 2168 pr_debug("MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
2176 "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
2177 __func__, cfi->mfr, cfi->id, 2169 __func__, cfi->mfr, cfi->id,
2178 cfi->addr_unlock1, cfi->addr_unlock2 ); 2170 cfi->addr_unlock1, cfi->addr_unlock2 );
2179 if (!cfi_jedec_setup(map, cfi, i)) 2171 if (!cfi_jedec_setup(map, cfi, i))
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 8cf667da2408..ddf9ec6d9168 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -189,10 +189,7 @@ static struct mtd_partition * newpart(char *s,
189 extra_mem_size; 189 extra_mem_size;
190 parts = kzalloc(alloc_size, GFP_KERNEL); 190 parts = kzalloc(alloc_size, GFP_KERNEL);
191 if (!parts) 191 if (!parts)
192 {
193 printk(KERN_ERR ERRP "out of memory\n");
194 return NULL; 192 return NULL;
195 }
196 extra_mem = (unsigned char *)(parts + *num_parts); 193 extra_mem = (unsigned char *)(parts + *num_parts);
197 } 194 }
198 /* enter this partition (offset will be calculated later if it is zero at this point) */ 195 /* enter this partition (offset will be calculated later if it is zero at this point) */
@@ -317,8 +314,8 @@ static int mtdpart_setup_real(char *s)
317 * the first one in the chain if a NULL mtd_id is passed in. 314 * the first one in the chain if a NULL mtd_id is passed in.
318 */ 315 */
319static int parse_cmdline_partitions(struct mtd_info *master, 316static int parse_cmdline_partitions(struct mtd_info *master,
320 struct mtd_partition **pparts, 317 struct mtd_partition **pparts,
321 unsigned long origin) 318 struct mtd_part_parser_data *data)
322{ 319{
323 unsigned long offset; 320 unsigned long offset;
324 int i; 321 int i;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 35081ce77fbd..283d887f7825 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -249,6 +249,16 @@ config MTD_DOC2001PLUS
249 under "NAND Flash Device Drivers" (currently that driver does not 249 under "NAND Flash Device Drivers" (currently that driver does not
250 support all Millennium Plus devices). 250 support all Millennium Plus devices).
251 251
252config MTD_DOCG3
253 tristate "M-Systems Disk-On-Chip G3"
254 ---help---
255 This provides an MTD device driver for the M-Systems DiskOnChip
256 G3 devices.
257
258 The driver provides access to G3 DiskOnChip, distributed by
259 M-Systems and now Sandisk. The support is very experimental,
260 and doesn't give access to any write operations.
261
252config MTD_DOCPROBE 262config MTD_DOCPROBE
253 tristate 263 tristate
254 select MTD_DOCECC 264 select MTD_DOCECC
@@ -268,8 +278,7 @@ config MTD_DOCPROBE_ADVANCED
268config MTD_DOCPROBE_ADDRESS 278config MTD_DOCPROBE_ADDRESS
269 hex "Physical address of DiskOnChip" if MTD_DOCPROBE_ADVANCED 279 hex "Physical address of DiskOnChip" if MTD_DOCPROBE_ADVANCED
270 depends on MTD_DOCPROBE 280 depends on MTD_DOCPROBE
271 default "0x0000" if MTD_DOCPROBE_ADVANCED 281 default "0x0"
272 default "0" if !MTD_DOCPROBE_ADVANCED
273 ---help--- 282 ---help---
274 By default, the probe for DiskOnChip devices will look for a 283 By default, the probe for DiskOnChip devices will look for a
275 DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000. 284 DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index f3226b1d38fc..56c7cd462f11 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -5,6 +5,7 @@
5obj-$(CONFIG_MTD_DOC2000) += doc2000.o 5obj-$(CONFIG_MTD_DOC2000) += doc2000.o
6obj-$(CONFIG_MTD_DOC2001) += doc2001.o 6obj-$(CONFIG_MTD_DOC2001) += doc2001.o
7obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o 7obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o
8obj-$(CONFIG_MTD_DOCG3) += docg3.o
8obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o 9obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o
9obj-$(CONFIG_MTD_DOCECC) += docecc.o 10obj-$(CONFIG_MTD_DOCECC) += docecc.o
10obj-$(CONFIG_MTD_SLRAM) += slram.o 11obj-$(CONFIG_MTD_SLRAM) += slram.o
@@ -17,3 +18,5 @@ obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
17obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o 18obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
18obj-$(CONFIG_MTD_M25P80) += m25p80.o 19obj-$(CONFIG_MTD_M25P80) += m25p80.o
19obj-$(CONFIG_MTD_SST25L) += sst25l.o 20obj-$(CONFIG_MTD_SST25L) += sst25l.o
21
22CFLAGS_docg3.o += -I$(src) \ No newline at end of file
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index f7fbf6025ef2..e9fad9151219 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -82,8 +82,7 @@ static int _DoC_WaitReady(struct DiskOnChip *doc)
82 void __iomem *docptr = doc->virtadr; 82 void __iomem *docptr = doc->virtadr;
83 unsigned long timeo = jiffies + (HZ * 10); 83 unsigned long timeo = jiffies + (HZ * 10);
84 84
85 DEBUG(MTD_DEBUG_LEVEL3, 85 pr_debug("_DoC_WaitReady called for out-of-line wait\n");
86 "_DoC_WaitReady called for out-of-line wait\n");
87 86
88 /* Out-of-line routine to wait for chip response */ 87 /* Out-of-line routine to wait for chip response */
89 while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { 88 while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
@@ -92,7 +91,7 @@ static int _DoC_WaitReady(struct DiskOnChip *doc)
92 DoC_Delay(doc, 2); 91 DoC_Delay(doc, 2);
93 92
94 if (time_after(jiffies, timeo)) { 93 if (time_after(jiffies, timeo)) {
95 DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n"); 94 pr_debug("_DoC_WaitReady timed out.\n");
96 return -EIO; 95 return -EIO;
97 } 96 }
98 udelay(1); 97 udelay(1);
@@ -323,8 +322,7 @@ static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
323 322
324 /* Reset the chip */ 323 /* Reset the chip */
325 if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) { 324 if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) {
326 DEBUG(MTD_DEBUG_LEVEL2, 325 pr_debug("DoC_Command (reset) for %d,%d returned true\n",
327 "DoC_Command (reset) for %d,%d returned true\n",
328 floor, chip); 326 floor, chip);
329 return 0; 327 return 0;
330 } 328 }
@@ -332,8 +330,7 @@ static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
332 330
333 /* Read the NAND chip ID: 1. Send ReadID command */ 331 /* Read the NAND chip ID: 1. Send ReadID command */
334 if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) { 332 if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) {
335 DEBUG(MTD_DEBUG_LEVEL2, 333 pr_debug("DoC_Command (ReadID) for %d,%d returned true\n",
336 "DoC_Command (ReadID) for %d,%d returned true\n",
337 floor, chip); 334 floor, chip);
338 return 0; 335 return 0;
339 } 336 }
@@ -699,7 +696,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
699#ifdef ECC_DEBUG 696#ifdef ECC_DEBUG
700 printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from); 697 printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from);
701#endif 698#endif
702 /* Read the ECC syndrom through the DiskOnChip ECC 699 /* Read the ECC syndrome through the DiskOnChip ECC
703 logic. These syndrome will be all ZERO when there 700 logic. These syndrome will be all ZERO when there
704 is no error */ 701 is no error */
705 for (i = 0; i < 6; i++) { 702 for (i = 0; i < 6; i++) {
@@ -930,7 +927,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
930 uint8_t *buf = ops->oobbuf; 927 uint8_t *buf = ops->oobbuf;
931 size_t len = ops->len; 928 size_t len = ops->len;
932 929
933 BUG_ON(ops->mode != MTD_OOB_PLACE); 930 BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
934 931
935 ofs += ops->ooboffs; 932 ofs += ops->ooboffs;
936 933
@@ -1094,7 +1091,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
1094 struct DiskOnChip *this = mtd->priv; 1091 struct DiskOnChip *this = mtd->priv;
1095 int ret; 1092 int ret;
1096 1093
1097 BUG_ON(ops->mode != MTD_OOB_PLACE); 1094 BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
1098 1095
1099 mutex_lock(&this->lock); 1096 mutex_lock(&this->lock);
1100 ret = doc_write_oob_nolock(mtd, ofs + ops->ooboffs, ops->len, 1097 ret = doc_write_oob_nolock(mtd, ofs + ops->ooboffs, ops->len,
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 241192f05bc8..a3f7a27499be 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -55,15 +55,14 @@ static int _DoC_WaitReady(void __iomem * docptr)
55{ 55{
56 unsigned short c = 0xffff; 56 unsigned short c = 0xffff;
57 57
58 DEBUG(MTD_DEBUG_LEVEL3, 58 pr_debug("_DoC_WaitReady called for out-of-line wait\n");
59 "_DoC_WaitReady called for out-of-line wait\n");
60 59
61 /* Out-of-line routine to wait for chip response */ 60 /* Out-of-line routine to wait for chip response */
62 while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c) 61 while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c)
63 ; 62 ;
64 63
65 if (c == 0) 64 if (c == 0)
66 DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n"); 65 pr_debug("_DoC_WaitReady timed out.\n");
67 66
68 return (c == 0); 67 return (c == 0);
69} 68}
@@ -464,7 +463,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
464#ifdef ECC_DEBUG 463#ifdef ECC_DEBUG
465 printk("DiskOnChip ECC Error: Read at %lx\n", (long)from); 464 printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
466#endif 465#endif
467 /* Read the ECC syndrom through the DiskOnChip ECC logic. 466 /* Read the ECC syndrome through the DiskOnChip ECC logic.
468 These syndrome will be all ZERO when there is no error */ 467 These syndrome will be all ZERO when there is no error */
469 for (i = 0; i < 6; i++) { 468 for (i = 0; i < 6; i++) {
470 syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i); 469 syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i);
@@ -632,7 +631,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
632 uint8_t *buf = ops->oobbuf; 631 uint8_t *buf = ops->oobbuf;
633 size_t len = ops->len; 632 size_t len = ops->len;
634 633
635 BUG_ON(ops->mode != MTD_OOB_PLACE); 634 BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
636 635
637 ofs += ops->ooboffs; 636 ofs += ops->ooboffs;
638 637
@@ -690,7 +689,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
690 uint8_t *buf = ops->oobbuf; 689 uint8_t *buf = ops->oobbuf;
691 size_t len = ops->len; 690 size_t len = ops->len;
692 691
693 BUG_ON(ops->mode != MTD_OOB_PLACE); 692 BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
694 693
695 ofs += ops->ooboffs; 694 ofs += ops->ooboffs;
696 695
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 09ae0adc3ad0..99351bc3e0ed 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -61,15 +61,14 @@ static int _DoC_WaitReady(void __iomem * docptr)
61{ 61{
62 unsigned int c = 0xffff; 62 unsigned int c = 0xffff;
63 63
64 DEBUG(MTD_DEBUG_LEVEL3, 64 pr_debug("_DoC_WaitReady called for out-of-line wait\n");
65 "_DoC_WaitReady called for out-of-line wait\n");
66 65
67 /* Out-of-line routine to wait for chip response */ 66 /* Out-of-line routine to wait for chip response */
68 while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c) 67 while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c)
69 ; 68 ;
70 69
71 if (c == 0) 70 if (c == 0)
72 DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n"); 71 pr_debug("_DoC_WaitReady timed out.\n");
73 72
74 return (c == 0); 73 return (c == 0);
75} 74}
@@ -655,7 +654,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
655#ifdef ECC_DEBUG 654#ifdef ECC_DEBUG
656 printk("DiskOnChip ECC Error: Read at %lx\n", (long)from); 655 printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
657#endif 656#endif
658 /* Read the ECC syndrom through the DiskOnChip ECC logic. 657 /* Read the ECC syndrome through the DiskOnChip ECC logic.
659 These syndrome will be all ZERO when there is no error */ 658 These syndrome will be all ZERO when there is no error */
660 for (i = 0; i < 6; i++) 659 for (i = 0; i < 6; i++)
661 syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i); 660 syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
@@ -835,7 +834,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
835 uint8_t *buf = ops->oobbuf; 834 uint8_t *buf = ops->oobbuf;
836 size_t len = ops->len; 835 size_t len = ops->len;
837 836
838 BUG_ON(ops->mode != MTD_OOB_PLACE); 837 BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
839 838
840 ofs += ops->ooboffs; 839 ofs += ops->ooboffs;
841 840
@@ -920,7 +919,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
920 uint8_t *buf = ops->oobbuf; 919 uint8_t *buf = ops->oobbuf;
921 size_t len = ops->len; 920 size_t len = ops->len;
922 921
923 BUG_ON(ops->mode != MTD_OOB_PLACE); 922 BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
924 923
925 ofs += ops->ooboffs; 924 ofs += ops->ooboffs;
926 925
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c
index 37ef29a73ee4..4a1c39b6f37d 100644
--- a/drivers/mtd/devices/docecc.c
+++ b/drivers/mtd/devices/docecc.c
@@ -2,7 +2,7 @@
2 * ECC algorithm for M-systems disk on chip. We use the excellent Reed 2 * ECC algorithm for M-systems disk on chip. We use the excellent Reed
3 * Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the 3 * Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the
4 * GNU GPL License. The rest is simply to convert the disk on chip 4 * GNU GPL License. The rest is simply to convert the disk on chip
5 * syndrom into a standard syndom. 5 * syndrome into a standard syndome.
6 * 6 *
7 * Author: Fabrice Bellard (fabrice.bellard@netgem.com) 7 * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
8 * Copyright (C) 2000 Netgem S.A. 8 * Copyright (C) 2000 Netgem S.A.
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
new file mode 100644
index 000000000000..bdcf5df982e8
--- /dev/null
+++ b/drivers/mtd/devices/docg3.c
@@ -0,0 +1,1114 @@
1/*
2 * Handles the M-Systems DiskOnChip G3 chip
3 *
4 * Copyright (C) 2011 Robert Jarzmik
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/platform_device.h>
26#include <linux/string.h>
27#include <linux/slab.h>
28#include <linux/io.h>
29#include <linux/delay.h>
30#include <linux/mtd/mtd.h>
31#include <linux/mtd/partitions.h>
32
33#include <linux/debugfs.h>
34#include <linux/seq_file.h>
35
36#define CREATE_TRACE_POINTS
37#include "docg3.h"
38
39/*
40 * This driver handles the DiskOnChip G3 flash memory.
41 *
42 * As no specification is available from M-Systems/Sandisk, this drivers lacks
43 * several functions available on the chip, as :
44 * - block erase
45 * - page write
46 * - IPL write
47 * - ECC fixing (lack of BCH algorith understanding)
48 * - powerdown / powerup
49 *
50 * The bus data width (8bits versus 16bits) is not handled (if_cfg flag), and
51 * the driver assumes a 16bits data bus.
52 *
53 * DocG3 relies on 2 ECC algorithms, which are handled in hardware :
54 * - a 1 byte Hamming code stored in the OOB for each page
55 * - a 7 bytes BCH code stored in the OOB for each page
56 * The BCH part is only used for check purpose, no correction is available as
57 * some information is missing. What is known is that :
58 * - BCH is in GF(2^14)
59 * - BCH is over data of 520 bytes (512 page + 7 page_info bytes
60 * + 1 hamming byte)
61 * - BCH can correct up to 4 bits (t = 4)
62 * - BCH syndroms are calculated in hardware, and checked in hardware as well
63 *
64 */
65
66static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
67{
68 u8 val = readb(docg3->base + reg);
69
70 trace_docg3_io(0, 8, reg, (int)val);
71 return val;
72}
73
74static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
75{
76 u16 val = readw(docg3->base + reg);
77
78 trace_docg3_io(0, 16, reg, (int)val);
79 return val;
80}
81
82static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
83{
84 writeb(val, docg3->base + reg);
85 trace_docg3_io(1, 16, reg, val);
86}
87
88static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
89{
90 writew(val, docg3->base + reg);
91 trace_docg3_io(1, 16, reg, val);
92}
93
94static inline void doc_flash_command(struct docg3 *docg3, u8 cmd)
95{
96 doc_writeb(docg3, cmd, DOC_FLASHCOMMAND);
97}
98
99static inline void doc_flash_sequence(struct docg3 *docg3, u8 seq)
100{
101 doc_writeb(docg3, seq, DOC_FLASHSEQUENCE);
102}
103
104static inline void doc_flash_address(struct docg3 *docg3, u8 addr)
105{
106 doc_writeb(docg3, addr, DOC_FLASHADDRESS);
107}
108
109static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
110
111static int doc_register_readb(struct docg3 *docg3, int reg)
112{
113 u8 val;
114
115 doc_writew(docg3, reg, DOC_READADDRESS);
116 val = doc_readb(docg3, reg);
117 doc_vdbg("Read register %04x : %02x\n", reg, val);
118 return val;
119}
120
121static int doc_register_readw(struct docg3 *docg3, int reg)
122{
123 u16 val;
124
125 doc_writew(docg3, reg, DOC_READADDRESS);
126 val = doc_readw(docg3, reg);
127 doc_vdbg("Read register %04x : %04x\n", reg, val);
128 return val;
129}
130
131/**
132 * doc_delay - delay docg3 operations
133 * @docg3: the device
134 * @nbNOPs: the number of NOPs to issue
135 *
136 * As no specification is available, the right timings between chip commands are
137 * unknown. The only available piece of information are the observed nops on a
138 * working docg3 chip.
139 * Therefore, doc_delay relies on a busy loop of NOPs, instead of scheduler
140 * friendlier msleep() functions or blocking mdelay().
141 */
142static void doc_delay(struct docg3 *docg3, int nbNOPs)
143{
144 int i;
145
146 doc_dbg("NOP x %d\n", nbNOPs);
147 for (i = 0; i < nbNOPs; i++)
148 doc_writeb(docg3, 0, DOC_NOP);
149}
150
151static int is_prot_seq_error(struct docg3 *docg3)
152{
153 int ctrl;
154
155 ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
156 return ctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR);
157}
158
159static int doc_is_ready(struct docg3 *docg3)
160{
161 int ctrl;
162
163 ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
164 return ctrl & DOC_CTRL_FLASHREADY;
165}
166
167static int doc_wait_ready(struct docg3 *docg3)
168{
169 int maxWaitCycles = 100;
170
171 do {
172 doc_delay(docg3, 4);
173 cpu_relax();
174 } while (!doc_is_ready(docg3) && maxWaitCycles--);
175 doc_delay(docg3, 2);
176 if (maxWaitCycles > 0)
177 return 0;
178 else
179 return -EIO;
180}
181
182static int doc_reset_seq(struct docg3 *docg3)
183{
184 int ret;
185
186 doc_writeb(docg3, 0x10, DOC_FLASHCONTROL);
187 doc_flash_sequence(docg3, DOC_SEQ_RESET);
188 doc_flash_command(docg3, DOC_CMD_RESET);
189 doc_delay(docg3, 2);
190 ret = doc_wait_ready(docg3);
191
192 doc_dbg("doc_reset_seq() -> isReady=%s\n", ret ? "false" : "true");
193 return ret;
194}
195
196/**
197 * doc_read_data_area - Read data from data area
198 * @docg3: the device
199 * @buf: the buffer to fill in
200 * @len: the lenght to read
201 * @first: first time read, DOC_READADDRESS should be set
202 *
203 * Reads bytes from flash data. Handles the single byte / even bytes reads.
204 */
205static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
206 int first)
207{
208 int i, cdr, len4;
209 u16 data16, *dst16;
210 u8 data8, *dst8;
211
212 doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len);
213 cdr = len & 0x3;
214 len4 = len - cdr;
215
216 if (first)
217 doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
218 dst16 = buf;
219 for (i = 0; i < len4; i += 2) {
220 data16 = doc_readw(docg3, DOC_IOSPACE_DATA);
221 *dst16 = data16;
222 dst16++;
223 }
224
225 if (cdr) {
226 doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
227 DOC_READADDRESS);
228 doc_delay(docg3, 1);
229 dst8 = (u8 *)dst16;
230 for (i = 0; i < cdr; i++) {
231 data8 = doc_readb(docg3, DOC_IOSPACE_DATA);
232 *dst8 = data8;
233 dst8++;
234 }
235 }
236}
237
238/**
239 * doc_set_data_mode - Sets the flash to reliable data mode
240 * @docg3: the device
241 *
242 * The reliable data mode is a bit slower than the fast mode, but less errors
243 * occur. Entering the reliable mode cannot be done without entering the fast
244 * mode first.
245 */
246static void doc_set_reliable_mode(struct docg3 *docg3)
247{
248 doc_dbg("doc_set_reliable_mode()\n");
249 doc_flash_sequence(docg3, DOC_SEQ_SET_MODE);
250 doc_flash_command(docg3, DOC_CMD_FAST_MODE);
251 doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
252 doc_delay(docg3, 2);
253}
254
255/**
256 * doc_set_asic_mode - Set the ASIC mode
257 * @docg3: the device
258 * @mode: the mode
259 *
260 * The ASIC can work in 3 modes :
261 * - RESET: all registers are zeroed
262 * - NORMAL: receives and handles commands
263 * - POWERDOWN: minimal poweruse, flash parts shut off
264 */
265static void doc_set_asic_mode(struct docg3 *docg3, u8 mode)
266{
267 int i;
268
269 for (i = 0; i < 12; i++)
270 doc_readb(docg3, DOC_IOSPACE_IPL);
271
272 mode |= DOC_ASICMODE_MDWREN;
273 doc_dbg("doc_set_asic_mode(%02x)\n", mode);
274 doc_writeb(docg3, mode, DOC_ASICMODE);
275 doc_writeb(docg3, ~mode, DOC_ASICMODECONFIRM);
276 doc_delay(docg3, 1);
277}
278
279/**
280 * doc_set_device_id - Sets the devices id for cascaded G3 chips
281 * @docg3: the device
282 * @id: the chip to select (amongst 0, 1, 2, 3)
283 *
284 * There can be 4 cascaded G3 chips. This function selects the one which will
285 * should be the active one.
286 */
287static void doc_set_device_id(struct docg3 *docg3, int id)
288{
289 u8 ctrl;
290
291 doc_dbg("doc_set_device_id(%d)\n", id);
292 doc_writeb(docg3, id, DOC_DEVICESELECT);
293 ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
294
295 ctrl &= ~DOC_CTRL_VIOLATION;
296 ctrl |= DOC_CTRL_CE;
297 doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
298}
299
300/**
301 * doc_set_extra_page_mode - Change flash page layout
302 * @docg3: the device
303 *
304 * Normally, the flash page is split into the data (512 bytes) and the out of
305 * band data (16 bytes). For each, 4 more bytes can be accessed, where the wear
306 * leveling counters are stored. To access this last area of 4 bytes, a special
307 * mode must be input to the flash ASIC.
308 *
309 * Returns 0 if no error occured, -EIO else.
310 */
311static int doc_set_extra_page_mode(struct docg3 *docg3)
312{
313 int fctrl;
314
315 doc_dbg("doc_set_extra_page_mode()\n");
316 doc_flash_sequence(docg3, DOC_SEQ_PAGE_SIZE_532);
317 doc_flash_command(docg3, DOC_CMD_PAGE_SIZE_532);
318 doc_delay(docg3, 2);
319
320 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
321 if (fctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR))
322 return -EIO;
323 else
324 return 0;
325}
326
327/**
328 * doc_seek - Set both flash planes to the specified block, page for reading
329 * @docg3: the device
330 * @block0: the first plane block index
331 * @block1: the second plane block index
332 * @page: the page index within the block
333 * @wear: if true, read will occur on the 4 extra bytes of the wear area
334 * @ofs: offset in page to read
335 *
336 * Programs the flash even and odd planes to the specific block and page.
337 * Alternatively, programs the flash to the wear area of the specified page.
338 */
339static int doc_read_seek(struct docg3 *docg3, int block0, int block1, int page,
340 int wear, int ofs)
341{
342 int sector, ret = 0;
343
344 doc_dbg("doc_seek(blocks=(%d,%d), page=%d, ofs=%d, wear=%d)\n",
345 block0, block1, page, ofs, wear);
346
347 if (!wear && (ofs < 2 * DOC_LAYOUT_PAGE_SIZE)) {
348 doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
349 doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
350 doc_delay(docg3, 2);
351 } else {
352 doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
353 doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
354 doc_delay(docg3, 2);
355 }
356
357 doc_set_reliable_mode(docg3);
358 if (wear)
359 ret = doc_set_extra_page_mode(docg3);
360 if (ret)
361 goto out;
362
363 sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
364 doc_flash_sequence(docg3, DOC_SEQ_READ);
365 doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
366 doc_delay(docg3, 1);
367 doc_flash_address(docg3, sector & 0xff);
368 doc_flash_address(docg3, (sector >> 8) & 0xff);
369 doc_flash_address(docg3, (sector >> 16) & 0xff);
370 doc_delay(docg3, 1);
371
372 sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
373 doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
374 doc_delay(docg3, 1);
375 doc_flash_address(docg3, sector & 0xff);
376 doc_flash_address(docg3, (sector >> 8) & 0xff);
377 doc_flash_address(docg3, (sector >> 16) & 0xff);
378 doc_delay(docg3, 2);
379
380out:
381 return ret;
382}
383
384/**
385 * doc_read_page_ecc_init - Initialize hardware ECC engine
386 * @docg3: the device
387 * @len: the number of bytes covered by the ECC (BCH covered)
388 *
389 * The function does initialize the hardware ECC engine to compute the Hamming
390 * ECC (on 1 byte) and the BCH Syndroms (on 7 bytes).
391 *
392 * Return 0 if succeeded, -EIO on error
393 */
394static int doc_read_page_ecc_init(struct docg3 *docg3, int len)
395{
396 doc_writew(docg3, DOC_ECCCONF0_READ_MODE
397 | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
398 | (len & DOC_ECCCONF0_DATA_BYTES_MASK),
399 DOC_ECCCONF0);
400 doc_delay(docg3, 4);
401 doc_register_readb(docg3, DOC_FLASHCONTROL);
402 return doc_wait_ready(docg3);
403}
404
405/**
406 * doc_read_page_prepare - Prepares reading data from a flash page
407 * @docg3: the device
408 * @block0: the first plane block index on flash memory
409 * @block1: the second plane block index on flash memory
410 * @page: the page index in the block
411 * @offset: the offset in the page (must be a multiple of 4)
412 *
413 * Prepares the page to be read in the flash memory :
414 * - tell ASIC to map the flash pages
415 * - tell ASIC to be in read mode
416 *
417 * After a call to this method, a call to doc_read_page_finish is mandatory,
418 * to end the read cycle of the flash.
419 *
420 * Read data from a flash page. The length to be read must be between 0 and
421 * (page_size + oob_size + wear_size), ie. 532, and a multiple of 4 (because
422 * the extra bytes reading is not implemented).
423 *
424 * As pages are grouped by 2 (in 2 planes), reading from a page must be done
425 * in two steps:
426 * - one read of 512 bytes at offset 0
427 * - one read of 512 bytes at offset 512 + 16
428 *
429 * Returns 0 if successful, -EIO if a read error occured.
430 */
431static int doc_read_page_prepare(struct docg3 *docg3, int block0, int block1,
432 int page, int offset)
433{
434 int wear_area = 0, ret = 0;
435
436 doc_dbg("doc_read_page_prepare(blocks=(%d,%d), page=%d, ofsInPage=%d)\n",
437 block0, block1, page, offset);
438 if (offset >= DOC_LAYOUT_WEAR_OFFSET)
439 wear_area = 1;
440 if (!wear_area && offset > (DOC_LAYOUT_PAGE_OOB_SIZE * 2))
441 return -EINVAL;
442
443 doc_set_device_id(docg3, docg3->device_id);
444 ret = doc_reset_seq(docg3);
445 if (ret)
446 goto err;
447
448 /* Program the flash address block and page */
449 ret = doc_read_seek(docg3, block0, block1, page, wear_area, offset);
450 if (ret)
451 goto err;
452
453 doc_flash_command(docg3, DOC_CMD_READ_ALL_PLANES);
454 doc_delay(docg3, 2);
455 doc_wait_ready(docg3);
456
457 doc_flash_command(docg3, DOC_CMD_SET_ADDR_READ);
458 doc_delay(docg3, 1);
459 if (offset >= DOC_LAYOUT_PAGE_SIZE * 2)
460 offset -= 2 * DOC_LAYOUT_PAGE_SIZE;
461 doc_flash_address(docg3, offset >> 2);
462 doc_delay(docg3, 1);
463 doc_wait_ready(docg3);
464
465 doc_flash_command(docg3, DOC_CMD_READ_FLASH);
466
467 return 0;
468err:
469 doc_writeb(docg3, 0, DOC_DATAEND);
470 doc_delay(docg3, 2);
471 return -EIO;
472}
473
474/**
475 * doc_read_page_getbytes - Reads bytes from a prepared page
476 * @docg3: the device
477 * @len: the number of bytes to be read (must be a multiple of 4)
478 * @buf: the buffer to be filled in
479 * @first: 1 if first time read, DOC_READADDRESS should be set
480 *
481 */
482static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
483 int first)
484{
485 doc_read_data_area(docg3, buf, len, first);
486 doc_delay(docg3, 2);
487 return len;
488}
489
490/**
491 * doc_get_hw_bch_syndroms - Get hardware calculated BCH syndroms
492 * @docg3: the device
493 * @syns: the array of 7 integers where the syndroms will be stored
494 */
495static void doc_get_hw_bch_syndroms(struct docg3 *docg3, int *syns)
496{
497 int i;
498
499 for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
500 syns[i] = doc_register_readb(docg3, DOC_BCH_SYNDROM(i));
501}
502
503/**
504 * doc_read_page_finish - Ends reading of a flash page
505 * @docg3: the device
506 *
507 * As a side effect, resets the chip selector to 0. This ensures that after each
508 * read operation, the floor 0 is selected. Therefore, if the systems halts, the
509 * reboot will boot on floor 0, where the IPL is.
510 */
511static void doc_read_page_finish(struct docg3 *docg3)
512{
513 doc_writeb(docg3, 0, DOC_DATAEND);
514 doc_delay(docg3, 2);
515 doc_set_device_id(docg3, 0);
516}
517
518/**
519 * calc_block_sector - Calculate blocks, pages and ofs.
520
521 * @from: offset in flash
522 * @block0: first plane block index calculated
523 * @block1: second plane block index calculated
524 * @page: page calculated
525 * @ofs: offset in page
526 */
527static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
528 int *ofs)
529{
530 uint sector;
531
532 sector = from / DOC_LAYOUT_PAGE_SIZE;
533 *block0 = sector / (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES)
534 * DOC_LAYOUT_NBPLANES;
535 *block1 = *block0 + 1;
536 *page = sector % (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES);
537 *page /= DOC_LAYOUT_NBPLANES;
538 if (sector % 2)
539 *ofs = DOC_LAYOUT_PAGE_OOB_SIZE;
540 else
541 *ofs = 0;
542}
543
544/**
545 * doc_read - Read bytes from flash
546 * @mtd: the device
547 * @from: the offset from first block and first page, in bytes, aligned on page
548 * size
549 * @len: the number of bytes to read (must be a multiple of 4)
550 * @retlen: the number of bytes actually read
551 * @buf: the filled in buffer
552 *
553 * Reads flash memory pages. This function does not read the OOB chunk, but only
554 * the page data.
555 *
556 * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
557 */
558static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
559 size_t *retlen, u_char *buf)
560{
561 struct docg3 *docg3 = mtd->priv;
562 int block0, block1, page, readlen, ret, ofs = 0;
563 int syn[DOC_ECC_BCH_SIZE], eccconf1;
564 u8 oob[DOC_LAYOUT_OOB_SIZE];
565
566 ret = -EINVAL;
567 doc_dbg("doc_read(from=%lld, len=%zu, buf=%p)\n", from, len, buf);
568 if (from % DOC_LAYOUT_PAGE_SIZE)
569 goto err;
570 if (len % 4)
571 goto err;
572 calc_block_sector(from, &block0, &block1, &page, &ofs);
573 if (block1 > docg3->max_block)
574 goto err;
575
576 *retlen = 0;
577 ret = 0;
578 readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
579 while (!ret && len > 0) {
580 readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
581 ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
582 if (ret < 0)
583 goto err;
584 ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_COVERED_BYTES);
585 if (ret < 0)
586 goto err_in_read;
587 ret = doc_read_page_getbytes(docg3, readlen, buf, 1);
588 if (ret < readlen)
589 goto err_in_read;
590 ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE,
591 oob, 0);
592 if (ret < DOC_LAYOUT_OOB_SIZE)
593 goto err_in_read;
594
595 *retlen += readlen;
596 buf += readlen;
597 len -= readlen;
598
599 ofs ^= DOC_LAYOUT_PAGE_OOB_SIZE;
600 if (ofs == 0)
601 page += 2;
602 if (page > DOC_ADDR_PAGE_MASK) {
603 page = 0;
604 block0 += 2;
605 block1 += 2;
606 }
607
608 /*
609 * There should be a BCH bitstream fixing algorithm here ...
610 * By now, a page read failure is triggered by BCH error
611 */
612 doc_get_hw_bch_syndroms(docg3, syn);
613 eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
614
615 doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
616 oob[0], oob[1], oob[2], oob[3], oob[4],
617 oob[5], oob[6]);
618 doc_dbg("OOB - HAMMING: %02x\n", oob[7]);
619 doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
620 oob[8], oob[9], oob[10], oob[11], oob[12],
621 oob[13], oob[14]);
622 doc_dbg("OOB - UNUSED: %02x\n", oob[15]);
623 doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1);
624 doc_dbg("ECC BCH syndrom: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
625 syn[0], syn[1], syn[2], syn[3], syn[4], syn[5], syn[6]);
626
627 ret = -EBADMSG;
628 if (block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) {
629 if (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR)
630 goto err_in_read;
631 if (is_prot_seq_error(docg3))
632 goto err_in_read;
633 }
634 doc_read_page_finish(docg3);
635 }
636
637 return 0;
638err_in_read:
639 doc_read_page_finish(docg3);
640err:
641 return ret;
642}
643
644/**
645 * doc_read_oob - Read out of band bytes from flash
646 * @mtd: the device
647 * @from: the offset from first block and first page, in bytes, aligned on page
648 * size
649 * @ops: the mtd oob structure
650 *
651 * Reads flash memory OOB area of pages.
652 *
653 * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
654 */
655static int doc_read_oob(struct mtd_info *mtd, loff_t from,
656 struct mtd_oob_ops *ops)
657{
658 struct docg3 *docg3 = mtd->priv;
659 int block0, block1, page, ofs, ret;
660 u8 *buf = ops->oobbuf;
661 size_t len = ops->ooblen;
662
663 doc_dbg("doc_read_oob(from=%lld, buf=%p, len=%zu)\n", from, buf, len);
664 if (len != DOC_LAYOUT_OOB_SIZE)
665 return -EINVAL;
666
667 switch (ops->mode) {
668 case MTD_OPS_PLACE_OOB:
669 buf += ops->ooboffs;
670 break;
671 default:
672 break;
673 }
674
675 calc_block_sector(from, &block0, &block1, &page, &ofs);
676 if (block1 > docg3->max_block)
677 return -EINVAL;
678
679 ret = doc_read_page_prepare(docg3, block0, block1, page,
680 ofs + DOC_LAYOUT_PAGE_SIZE);
681 if (!ret)
682 ret = doc_read_page_ecc_init(docg3, DOC_LAYOUT_OOB_SIZE);
683 if (!ret)
684 ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE,
685 buf, 1);
686 doc_read_page_finish(docg3);
687
688 if (ret > 0)
689 ops->oobretlen = ret;
690 else
691 ops->oobretlen = 0;
692 return (ret > 0) ? 0 : ret;
693}
694
695static int doc_reload_bbt(struct docg3 *docg3)
696{
697 int block = DOC_LAYOUT_BLOCK_BBT;
698 int ret = 0, nbpages, page;
699 u_char *buf = docg3->bbt;
700
701 nbpages = DIV_ROUND_UP(docg3->max_block + 1, 8 * DOC_LAYOUT_PAGE_SIZE);
702 for (page = 0; !ret && (page < nbpages); page++) {
703 ret = doc_read_page_prepare(docg3, block, block + 1,
704 page + DOC_LAYOUT_PAGE_BBT, 0);
705 if (!ret)
706 ret = doc_read_page_ecc_init(docg3,
707 DOC_LAYOUT_PAGE_SIZE);
708 if (!ret)
709 doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE,
710 buf, 1);
711 buf += DOC_LAYOUT_PAGE_SIZE;
712 }
713 doc_read_page_finish(docg3);
714 return ret;
715}
716
717/**
718 * doc_block_isbad - Checks whether a block is good or not
719 * @mtd: the device
720 * @from: the offset to find the correct block
721 *
722 * Returns 1 if block is bad, 0 if block is good
723 */
724static int doc_block_isbad(struct mtd_info *mtd, loff_t from)
725{
726 struct docg3 *docg3 = mtd->priv;
727 int block0, block1, page, ofs, is_good;
728
729 calc_block_sector(from, &block0, &block1, &page, &ofs);
730 doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n",
731 from, block0, block1, page, ofs);
732
733 if (block0 < DOC_LAYOUT_BLOCK_FIRST_DATA)
734 return 0;
735 if (block1 > docg3->max_block)
736 return -EINVAL;
737
738 is_good = docg3->bbt[block0 >> 3] & (1 << (block0 & 0x7));
739 return !is_good;
740}
741
742/**
743 * doc_get_erase_count - Get block erase count
744 * @docg3: the device
745 * @from: the offset in which the block is.
746 *
747 * Get the number of times a block was erased. The number is the maximum of
748 * erase times between first and second plane (which should be equal normally).
749 *
750 * Returns The number of erases, or -EINVAL or -EIO on error.
751 */
752static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
753{
754 u8 buf[DOC_LAYOUT_WEAR_SIZE];
755 int ret, plane1_erase_count, plane2_erase_count;
756 int block0, block1, page, ofs;
757
758 doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf);
759 if (from % DOC_LAYOUT_PAGE_SIZE)
760 return -EINVAL;
761 calc_block_sector(from, &block0, &block1, &page, &ofs);
762 if (block1 > docg3->max_block)
763 return -EINVAL;
764
765 ret = doc_reset_seq(docg3);
766 if (!ret)
767 ret = doc_read_page_prepare(docg3, block0, block1, page,
768 ofs + DOC_LAYOUT_WEAR_OFFSET);
769 if (!ret)
770 ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE,
771 buf, 1);
772 doc_read_page_finish(docg3);
773
774 if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK))
775 return -EIO;
776 plane1_erase_count = (u8)(~buf[1]) | ((u8)(~buf[4]) << 8)
777 | ((u8)(~buf[5]) << 16);
778 plane2_erase_count = (u8)(~buf[3]) | ((u8)(~buf[6]) << 8)
779 | ((u8)(~buf[7]) << 16);
780
781 return max(plane1_erase_count, plane2_erase_count);
782}
783
784/*
785 * Debug sysfs entries
786 */
787static int dbg_flashctrl_show(struct seq_file *s, void *p)
788{
789 struct docg3 *docg3 = (struct docg3 *)s->private;
790
791 int pos = 0;
792 u8 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
793
794 pos += seq_printf(s,
795 "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
796 fctrl,
797 fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-",
798 fctrl & DOC_CTRL_CE ? "active" : "inactive",
799 fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-",
800 fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-",
801 fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready");
802 return pos;
803}
804DEBUGFS_RO_ATTR(flashcontrol, dbg_flashctrl_show);
805
806static int dbg_asicmode_show(struct seq_file *s, void *p)
807{
808 struct docg3 *docg3 = (struct docg3 *)s->private;
809
810 int pos = 0;
811 int pctrl = doc_register_readb(docg3, DOC_ASICMODE);
812 int mode = pctrl & 0x03;
813
814 pos += seq_printf(s,
815 "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
816 pctrl,
817 pctrl & DOC_ASICMODE_RAM_WE ? 1 : 0,
818 pctrl & DOC_ASICMODE_RSTIN_RESET ? 1 : 0,
819 pctrl & DOC_ASICMODE_BDETCT_RESET ? 1 : 0,
820 pctrl & DOC_ASICMODE_MDWREN ? 1 : 0,
821 pctrl & DOC_ASICMODE_POWERDOWN ? 1 : 0,
822 mode >> 1, mode & 0x1);
823
824 switch (mode) {
825 case DOC_ASICMODE_RESET:
826 pos += seq_printf(s, "reset");
827 break;
828 case DOC_ASICMODE_NORMAL:
829 pos += seq_printf(s, "normal");
830 break;
831 case DOC_ASICMODE_POWERDOWN:
832 pos += seq_printf(s, "powerdown");
833 break;
834 }
835 pos += seq_printf(s, ")\n");
836 return pos;
837}
838DEBUGFS_RO_ATTR(asic_mode, dbg_asicmode_show);
839
840static int dbg_device_id_show(struct seq_file *s, void *p)
841{
842 struct docg3 *docg3 = (struct docg3 *)s->private;
843 int pos = 0;
844 int id = doc_register_readb(docg3, DOC_DEVICESELECT);
845
846 pos += seq_printf(s, "DeviceId = %d\n", id);
847 return pos;
848}
849DEBUGFS_RO_ATTR(device_id, dbg_device_id_show);
850
851static int dbg_protection_show(struct seq_file *s, void *p)
852{
853 struct docg3 *docg3 = (struct docg3 *)s->private;
854 int pos = 0;
855 int protect = doc_register_readb(docg3, DOC_PROTECTION);
856 int dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
857 int dps0_low = doc_register_readb(docg3, DOC_DPS0_ADDRLOW);
858 int dps0_high = doc_register_readb(docg3, DOC_DPS0_ADDRHIGH);
859 int dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
860 int dps1_low = doc_register_readb(docg3, DOC_DPS1_ADDRLOW);
861 int dps1_high = doc_register_readb(docg3, DOC_DPS1_ADDRHIGH);
862
863 pos += seq_printf(s, "Protection = 0x%02x (",
864 protect);
865 if (protect & DOC_PROTECT_FOUNDRY_OTP_LOCK)
866 pos += seq_printf(s, "FOUNDRY_OTP_LOCK,");
867 if (protect & DOC_PROTECT_CUSTOMER_OTP_LOCK)
868 pos += seq_printf(s, "CUSTOMER_OTP_LOCK,");
869 if (protect & DOC_PROTECT_LOCK_INPUT)
870 pos += seq_printf(s, "LOCK_INPUT,");
871 if (protect & DOC_PROTECT_STICKY_LOCK)
872 pos += seq_printf(s, "STICKY_LOCK,");
873 if (protect & DOC_PROTECT_PROTECTION_ENABLED)
874 pos += seq_printf(s, "PROTECTION ON,");
875 if (protect & DOC_PROTECT_IPL_DOWNLOAD_LOCK)
876 pos += seq_printf(s, "IPL_DOWNLOAD_LOCK,");
877 if (protect & DOC_PROTECT_PROTECTION_ERROR)
878 pos += seq_printf(s, "PROTECT_ERR,");
879 else
880 pos += seq_printf(s, "NO_PROTECT_ERR");
881 pos += seq_printf(s, ")\n");
882
883 pos += seq_printf(s, "DPS0 = 0x%02x : "
884 "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, "
885 "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
886 dps0, dps0_low, dps0_high,
887 !!(dps0 & DOC_DPS_OTP_PROTECTED),
888 !!(dps0 & DOC_DPS_READ_PROTECTED),
889 !!(dps0 & DOC_DPS_WRITE_PROTECTED),
890 !!(dps0 & DOC_DPS_HW_LOCK_ENABLED),
891 !!(dps0 & DOC_DPS_KEY_OK));
892 pos += seq_printf(s, "DPS1 = 0x%02x : "
893 "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, "
894 "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
895 dps1, dps1_low, dps1_high,
896 !!(dps1 & DOC_DPS_OTP_PROTECTED),
897 !!(dps1 & DOC_DPS_READ_PROTECTED),
898 !!(dps1 & DOC_DPS_WRITE_PROTECTED),
899 !!(dps1 & DOC_DPS_HW_LOCK_ENABLED),
900 !!(dps1 & DOC_DPS_KEY_OK));
901 return pos;
902}
903DEBUGFS_RO_ATTR(protection, dbg_protection_show);
904
905static int __init doc_dbg_register(struct docg3 *docg3)
906{
907 struct dentry *root, *entry;
908
909 root = debugfs_create_dir("docg3", NULL);
910 if (!root)
911 return -ENOMEM;
912
913 entry = debugfs_create_file("flashcontrol", S_IRUSR, root, docg3,
914 &flashcontrol_fops);
915 if (entry)
916 entry = debugfs_create_file("asic_mode", S_IRUSR, root,
917 docg3, &asic_mode_fops);
918 if (entry)
919 entry = debugfs_create_file("device_id", S_IRUSR, root,
920 docg3, &device_id_fops);
921 if (entry)
922 entry = debugfs_create_file("protection", S_IRUSR, root,
923 docg3, &protection_fops);
924 if (entry) {
925 docg3->debugfs_root = root;
926 return 0;
927 } else {
928 debugfs_remove_recursive(root);
929 return -ENOMEM;
930 }
931}
932
933static void __exit doc_dbg_unregister(struct docg3 *docg3)
934{
935 debugfs_remove_recursive(docg3->debugfs_root);
936}
937
938/**
939 * doc_set_driver_info - Fill the mtd_info structure and docg3 structure
940 * @chip_id: The chip ID of the supported chip
941 * @mtd: The structure to fill
942 */
943static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
944{
945 struct docg3 *docg3 = mtd->priv;
946 int cfg;
947
948 cfg = doc_register_readb(docg3, DOC_CONFIGURATION);
949 docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0);
950
951 switch (chip_id) {
952 case DOC_CHIPID_G3:
953 mtd->name = "DiskOnChip G3";
954 docg3->max_block = 2047;
955 break;
956 }
957 mtd->type = MTD_NANDFLASH;
958 /*
959 * Once write methods are added, the correct flags will be set.
960 * mtd->flags = MTD_CAP_NANDFLASH;
961 */
962 mtd->flags = MTD_CAP_ROM;
963 mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE;
964 mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
965 mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
966 mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
967 mtd->owner = THIS_MODULE;
968 mtd->erase = NULL;
969 mtd->point = NULL;
970 mtd->unpoint = NULL;
971 mtd->read = doc_read;
972 mtd->write = NULL;
973 mtd->read_oob = doc_read_oob;
974 mtd->write_oob = NULL;
975 mtd->sync = NULL;
976 mtd->block_isbad = doc_block_isbad;
977}
978
979/**
980 * doc_probe - Probe the IO space for a DiskOnChip G3 chip
981 * @pdev: platform device
982 *
983 * Probes for a G3 chip at the specified IO space in the platform data
984 * ressources.
985 *
986 * Returns 0 on success, -ENOMEM, -ENXIO on error
987 */
988static int __init docg3_probe(struct platform_device *pdev)
989{
990 struct device *dev = &pdev->dev;
991 struct docg3 *docg3;
992 struct mtd_info *mtd;
993 struct resource *ress;
994 int ret, bbt_nbpages;
995 u16 chip_id, chip_id_inv;
996
997 ret = -ENOMEM;
998 docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL);
999 if (!docg3)
1000 goto nomem1;
1001 mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
1002 if (!mtd)
1003 goto nomem2;
1004 mtd->priv = docg3;
1005
1006 ret = -ENXIO;
1007 ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1008 if (!ress) {
1009 dev_err(dev, "No I/O memory resource defined\n");
1010 goto noress;
1011 }
1012 docg3->base = ioremap(ress->start, DOC_IOSPACE_SIZE);
1013
1014 docg3->dev = &pdev->dev;
1015 docg3->device_id = 0;
1016 doc_set_device_id(docg3, docg3->device_id);
1017 doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
1018 doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL);
1019
1020 chip_id = doc_register_readw(docg3, DOC_CHIPID);
1021 chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV);
1022
1023 ret = -ENODEV;
1024 if (chip_id != (u16)(~chip_id_inv)) {
1025 doc_info("No device found at IO addr %p\n",
1026 (void *)ress->start);
1027 goto nochipfound;
1028 }
1029
1030 switch (chip_id) {
1031 case DOC_CHIPID_G3:
1032 doc_info("Found a G3 DiskOnChip at addr %p\n",
1033 (void *)ress->start);
1034 break;
1035 default:
1036 doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
1037 goto nochipfound;
1038 }
1039
1040 doc_set_driver_info(chip_id, mtd);
1041 platform_set_drvdata(pdev, mtd);
1042
1043 ret = -ENOMEM;
1044 bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
1045 8 * DOC_LAYOUT_PAGE_SIZE);
1046 docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
1047 if (!docg3->bbt)
1048 goto nochipfound;
1049 doc_reload_bbt(docg3);
1050
1051 ret = mtd_device_parse_register(mtd, part_probes,
1052 NULL, NULL, 0);
1053 if (ret)
1054 goto register_error;
1055
1056 doc_dbg_register(docg3);
1057 return 0;
1058
1059register_error:
1060 kfree(docg3->bbt);
1061nochipfound:
1062 iounmap(docg3->base);
1063noress:
1064 kfree(mtd);
1065nomem2:
1066 kfree(docg3);
1067nomem1:
1068 return ret;
1069}
1070
1071/**
1072 * docg3_release - Release the driver
1073 * @pdev: the platform device
1074 *
1075 * Returns 0
1076 */
1077static int __exit docg3_release(struct platform_device *pdev)
1078{
1079 struct mtd_info *mtd = platform_get_drvdata(pdev);
1080 struct docg3 *docg3 = mtd->priv;
1081
1082 doc_dbg_unregister(docg3);
1083 mtd_device_unregister(mtd);
1084 iounmap(docg3->base);
1085 kfree(docg3->bbt);
1086 kfree(docg3);
1087 kfree(mtd);
1088 return 0;
1089}
1090
1091static struct platform_driver g3_driver = {
1092 .driver = {
1093 .name = "docg3",
1094 .owner = THIS_MODULE,
1095 },
1096 .remove = __exit_p(docg3_release),
1097};
1098
1099static int __init docg3_init(void)
1100{
1101 return platform_driver_probe(&g3_driver, docg3_probe);
1102}
1103module_init(docg3_init);
1104
1105
1106static void __exit docg3_exit(void)
1107{
1108 platform_driver_unregister(&g3_driver);
1109}
1110module_exit(docg3_exit);
1111
1112MODULE_LICENSE("GPL");
1113MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
1114MODULE_DESCRIPTION("MTD driver for DiskOnChip G3");
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h
new file mode 100644
index 000000000000..0d407be24594
--- /dev/null
+++ b/drivers/mtd/devices/docg3.h
@@ -0,0 +1,297 @@
1/*
2 * Handles the M-Systems DiskOnChip G3 chip
3 *
4 * Copyright (C) 2011 Robert Jarzmik
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#ifndef _MTD_DOCG3_H
23#define _MTD_DOCG3_H
24
25/*
26 * Flash memory areas :
27 * - 0x0000 .. 0x07ff : IPL
28 * - 0x0800 .. 0x0fff : Data area
29 * - 0x1000 .. 0x17ff : Registers
30 * - 0x1800 .. 0x1fff : Unknown
31 */
32#define DOC_IOSPACE_IPL 0x0000
33#define DOC_IOSPACE_DATA 0x0800
34#define DOC_IOSPACE_SIZE 0x2000
35
36/*
37 * DOC G3 layout and adressing scheme
38 * A page address for the block "b", plane "P" and page "p":
39 * address = [bbbb bPpp pppp]
40 */
41
42#define DOC_ADDR_PAGE_MASK 0x3f
43#define DOC_ADDR_BLOCK_SHIFT 6
44#define DOC_LAYOUT_NBPLANES 2
45#define DOC_LAYOUT_PAGES_PER_BLOCK 64
46#define DOC_LAYOUT_PAGE_SIZE 512
47#define DOC_LAYOUT_OOB_SIZE 16
48#define DOC_LAYOUT_WEAR_SIZE 8
49#define DOC_LAYOUT_PAGE_OOB_SIZE \
50 (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_SIZE)
51#define DOC_LAYOUT_WEAR_OFFSET (DOC_LAYOUT_PAGE_OOB_SIZE * 2)
52#define DOC_LAYOUT_BLOCK_SIZE \
53 (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_PAGE_SIZE)
54#define DOC_ECC_BCH_SIZE 7
55#define DOC_ECC_BCH_COVERED_BYTES \
56 (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_PAGEINFO_SZ + \
57 DOC_LAYOUT_OOB_HAMMING_SZ + DOC_LAYOUT_OOB_BCH_SZ)
58
59/*
60 * Blocks distribution
61 */
62#define DOC_LAYOUT_BLOCK_BBT 0
63#define DOC_LAYOUT_BLOCK_OTP 0
64#define DOC_LAYOUT_BLOCK_FIRST_DATA 6
65
66#define DOC_LAYOUT_PAGE_BBT 4
67
68/*
69 * Extra page OOB (16 bytes wide) layout
70 */
71#define DOC_LAYOUT_OOB_PAGEINFO_OFS 0
72#define DOC_LAYOUT_OOB_HAMMING_OFS 7
73#define DOC_LAYOUT_OOB_BCH_OFS 8
74#define DOC_LAYOUT_OOB_UNUSED_OFS 15
75#define DOC_LAYOUT_OOB_PAGEINFO_SZ 7
76#define DOC_LAYOUT_OOB_HAMMING_SZ 1
77#define DOC_LAYOUT_OOB_BCH_SZ 7
78#define DOC_LAYOUT_OOB_UNUSED_SZ 1
79
80
81#define DOC_CHIPID_G3 0x200
82#define DOC_ERASE_MARK 0xaa
83/*
84 * Flash registers
85 */
86#define DOC_CHIPID 0x1000
87#define DOC_TEST 0x1004
88#define DOC_BUSLOCK 0x1006
89#define DOC_ENDIANCONTROL 0x1008
90#define DOC_DEVICESELECT 0x100a
91#define DOC_ASICMODE 0x100c
92#define DOC_CONFIGURATION 0x100e
93#define DOC_INTERRUPTCONTROL 0x1010
94#define DOC_READADDRESS 0x101a
95#define DOC_DATAEND 0x101e
96#define DOC_INTERRUPTSTATUS 0x1020
97
98#define DOC_FLASHSEQUENCE 0x1032
99#define DOC_FLASHCOMMAND 0x1034
100#define DOC_FLASHADDRESS 0x1036
101#define DOC_FLASHCONTROL 0x1038
102#define DOC_NOP 0x103e
103
104#define DOC_ECCCONF0 0x1040
105#define DOC_ECCCONF1 0x1042
106#define DOC_ECCPRESET 0x1044
107#define DOC_HAMMINGPARITY 0x1046
108#define DOC_BCH_SYNDROM(idx) (0x1048 + (idx << 1))
109
110#define DOC_PROTECTION 0x1056
111#define DOC_DPS0_ADDRLOW 0x1060
112#define DOC_DPS0_ADDRHIGH 0x1062
113#define DOC_DPS1_ADDRLOW 0x1064
114#define DOC_DPS1_ADDRHIGH 0x1066
115#define DOC_DPS0_STATUS 0x106c
116#define DOC_DPS1_STATUS 0x106e
117
118#define DOC_ASICMODECONFIRM 0x1072
119#define DOC_CHIPID_INV 0x1074
120
121/*
122 * Flash sequences
123 * A sequence is preset before one or more commands are input to the chip.
124 */
125#define DOC_SEQ_RESET 0x00
126#define DOC_SEQ_PAGE_SIZE_532 0x03
127#define DOC_SEQ_SET_MODE 0x09
128#define DOC_SEQ_READ 0x12
129#define DOC_SEQ_SET_PLANE1 0x0e
130#define DOC_SEQ_SET_PLANE2 0x10
131#define DOC_SEQ_PAGE_SETUP 0x1d
132
133/*
134 * Flash commands
135 */
136#define DOC_CMD_READ_PLANE1 0x00
137#define DOC_CMD_SET_ADDR_READ 0x05
138#define DOC_CMD_READ_ALL_PLANES 0x30
139#define DOC_CMD_READ_PLANE2 0x50
140#define DOC_CMD_READ_FLASH 0xe0
141#define DOC_CMD_PAGE_SIZE_532 0x3c
142
143#define DOC_CMD_PROG_BLOCK_ADDR 0x60
144#define DOC_CMD_PROG_CYCLE1 0x80
145#define DOC_CMD_PROG_CYCLE2 0x10
146#define DOC_CMD_ERASECYCLE2 0xd0
147
148#define DOC_CMD_RELIABLE_MODE 0x22
149#define DOC_CMD_FAST_MODE 0xa2
150
151#define DOC_CMD_RESET 0xff
152
153/*
154 * Flash register : DOC_FLASHCONTROL
155 */
156#define DOC_CTRL_VIOLATION 0x20
157#define DOC_CTRL_CE 0x10
158#define DOC_CTRL_UNKNOWN_BITS 0x08
159#define DOC_CTRL_PROTECTION_ERROR 0x04
160#define DOC_CTRL_SEQUENCE_ERROR 0x02
161#define DOC_CTRL_FLASHREADY 0x01
162
163/*
164 * Flash register : DOC_ASICMODE
165 */
166#define DOC_ASICMODE_RESET 0x00
167#define DOC_ASICMODE_NORMAL 0x01
168#define DOC_ASICMODE_POWERDOWN 0x02
169#define DOC_ASICMODE_MDWREN 0x04
170#define DOC_ASICMODE_BDETCT_RESET 0x08
171#define DOC_ASICMODE_RSTIN_RESET 0x10
172#define DOC_ASICMODE_RAM_WE 0x20
173
174/*
175 * Flash register : DOC_ECCCONF0
176 */
177#define DOC_ECCCONF0_READ_MODE 0x8000
178#define DOC_ECCCONF0_AUTO_ECC_ENABLE 0x4000
179#define DOC_ECCCONF0_HAMMING_ENABLE 0x1000
180#define DOC_ECCCONF0_BCH_ENABLE 0x0800
181#define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff
182
183/*
184 * Flash register : DOC_ECCCONF1
185 */
186#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
187#define DOC_ECCCONF1_UNKOWN1 0x40
188#define DOC_ECCCONF1_UNKOWN2 0x20
189#define DOC_ECCCONF1_UNKOWN3 0x10
190#define DOC_ECCCONF1_HAMMING_BITS_MASK 0x0f
191
192/*
193 * Flash register : DOC_PROTECTION
194 */
195#define DOC_PROTECT_FOUNDRY_OTP_LOCK 0x01
196#define DOC_PROTECT_CUSTOMER_OTP_LOCK 0x02
197#define DOC_PROTECT_LOCK_INPUT 0x04
198#define DOC_PROTECT_STICKY_LOCK 0x08
199#define DOC_PROTECT_PROTECTION_ENABLED 0x10
200#define DOC_PROTECT_IPL_DOWNLOAD_LOCK 0x20
201#define DOC_PROTECT_PROTECTION_ERROR 0x80
202
203/*
204 * Flash register : DOC_DPS0_STATUS and DOC_DPS1_STATUS
205 */
206#define DOC_DPS_OTP_PROTECTED 0x01
207#define DOC_DPS_READ_PROTECTED 0x02
208#define DOC_DPS_WRITE_PROTECTED 0x04
209#define DOC_DPS_HW_LOCK_ENABLED 0x08
210#define DOC_DPS_KEY_OK 0x80
211
212/*
213 * Flash register : DOC_CONFIGURATION
214 */
215#define DOC_CONF_IF_CFG 0x80
216#define DOC_CONF_MAX_ID_MASK 0x30
217#define DOC_CONF_VCCQ_3V 0x01
218
219/*
220 * Flash register : DOC_READADDRESS
221 */
222#define DOC_READADDR_INC 0x8000
223#define DOC_READADDR_ONE_BYTE 0x4000
224#define DOC_READADDR_ADDR_MASK 0x1fff
225
226/**
227 * struct docg3 - DiskOnChip driver private data
228 * @dev: the device currently under control
229 * @base: mapped IO space
230 * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
231 * @if_cfg: if true, reads are on 16bits, else reads are on 8bits
232 * @bbt: bad block table cache
233 * @debugfs_root: debugfs root node
234 */
235struct docg3 {
236 struct device *dev;
237 void __iomem *base;
238 unsigned int device_id:4;
239 unsigned int if_cfg:1;
240 int max_block;
241 u8 *bbt;
242 struct dentry *debugfs_root;
243};
244
245#define doc_err(fmt, arg...) dev_err(docg3->dev, (fmt), ## arg)
246#define doc_info(fmt, arg...) dev_info(docg3->dev, (fmt), ## arg)
247#define doc_dbg(fmt, arg...) dev_dbg(docg3->dev, (fmt), ## arg)
248#define doc_vdbg(fmt, arg...) dev_vdbg(docg3->dev, (fmt), ## arg)
249
250#define DEBUGFS_RO_ATTR(name, show_fct) \
251 static int name##_open(struct inode *inode, struct file *file) \
252 { return single_open(file, show_fct, inode->i_private); } \
253 static const struct file_operations name##_fops = { \
254 .owner = THIS_MODULE, \
255 .open = name##_open, \
256 .llseek = seq_lseek, \
257 .read = seq_read, \
258 .release = single_release \
259 };
260#endif
261
262/*
263 * Trace events part
264 */
265#undef TRACE_SYSTEM
266#define TRACE_SYSTEM docg3
267
268#if !defined(_MTD_DOCG3_TRACE) || defined(TRACE_HEADER_MULTI_READ)
269#define _MTD_DOCG3_TRACE
270
271#include <linux/tracepoint.h>
272
273TRACE_EVENT(docg3_io,
274 TP_PROTO(int op, int width, u16 reg, int val),
275 TP_ARGS(op, width, reg, val),
276 TP_STRUCT__entry(
277 __field(int, op)
278 __field(unsigned char, width)
279 __field(u16, reg)
280 __field(int, val)),
281 TP_fast_assign(
282 __entry->op = op;
283 __entry->width = width;
284 __entry->reg = reg;
285 __entry->val = val;),
286 TP_printk("docg3: %s%02d reg=%04x, val=%04x",
287 __entry->op ? "write" : "read", __entry->width,
288 __entry->reg, __entry->val)
289 );
290#endif
291
292/* This part must be outside protection */
293#undef TRACE_INCLUDE_PATH
294#undef TRACE_INCLUDE_FILE
295#define TRACE_INCLUDE_PATH .
296#define TRACE_INCLUDE_FILE docg3
297#include <trace/define_trace.h>
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
index d374603493a7..45116bb30297 100644
--- a/drivers/mtd/devices/docprobe.c
+++ b/drivers/mtd/devices/docprobe.c
@@ -50,11 +50,6 @@
50#include <linux/mtd/nand.h> 50#include <linux/mtd/nand.h>
51#include <linux/mtd/doc2000.h> 51#include <linux/mtd/doc2000.h>
52 52
53/* Where to look for the devices? */
54#ifndef CONFIG_MTD_DOCPROBE_ADDRESS
55#define CONFIG_MTD_DOCPROBE_ADDRESS 0
56#endif
57
58 53
59static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS; 54static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS;
60module_param(doc_config_location, ulong, 0); 55module_param(doc_config_location, ulong, 0);
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 772a0ff89e0f..3a11ea628e58 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -34,9 +34,6 @@
34/* debugging */ 34/* debugging */
35//#define LART_DEBUG 35//#define LART_DEBUG
36 36
37/* partition support */
38#define HAVE_PARTITIONS
39
40#include <linux/kernel.h> 37#include <linux/kernel.h>
41#include <linux/module.h> 38#include <linux/module.h>
42#include <linux/types.h> 39#include <linux/types.h>
@@ -44,9 +41,7 @@
44#include <linux/errno.h> 41#include <linux/errno.h>
45#include <linux/string.h> 42#include <linux/string.h>
46#include <linux/mtd/mtd.h> 43#include <linux/mtd/mtd.h>
47#ifdef HAVE_PARTITIONS
48#include <linux/mtd/partitions.h> 44#include <linux/mtd/partitions.h>
49#endif
50 45
51#ifndef CONFIG_SA1100_LART 46#ifndef CONFIG_SA1100_LART
52#error This is for LART architecture only 47#error This is for LART architecture only
@@ -598,7 +593,6 @@ static struct mtd_erase_region_info erase_regions[] = {
598 } 593 }
599}; 594};
600 595
601#ifdef HAVE_PARTITIONS
602static struct mtd_partition lart_partitions[] = { 596static struct mtd_partition lart_partitions[] = {
603 /* blob */ 597 /* blob */
604 { 598 {
@@ -619,7 +613,7 @@ static struct mtd_partition lart_partitions[] = {
619 .size = INITRD_LEN, /* MTDPART_SIZ_FULL */ 613 .size = INITRD_LEN, /* MTDPART_SIZ_FULL */
620 } 614 }
621}; 615};
622#endif 616#define NUM_PARTITIONS ARRAY_SIZE(lart_partitions)
623 617
624static int __init lart_flash_init (void) 618static int __init lart_flash_init (void)
625{ 619{
@@ -668,7 +662,6 @@ static int __init lart_flash_init (void)
668 result,mtd.eraseregions[result].erasesize,mtd.eraseregions[result].erasesize / 1024, 662 result,mtd.eraseregions[result].erasesize,mtd.eraseregions[result].erasesize / 1024,
669 result,mtd.eraseregions[result].numblocks); 663 result,mtd.eraseregions[result].numblocks);
670 664
671#ifdef HAVE_PARTITIONS
672 printk ("\npartitions = %d\n", ARRAY_SIZE(lart_partitions)); 665 printk ("\npartitions = %d\n", ARRAY_SIZE(lart_partitions));
673 666
674 for (result = 0; result < ARRAY_SIZE(lart_partitions); result++) 667 for (result = 0; result < ARRAY_SIZE(lart_partitions); result++)
@@ -681,25 +674,16 @@ static int __init lart_flash_init (void)
681 result,lart_partitions[result].offset, 674 result,lart_partitions[result].offset,
682 result,lart_partitions[result].size,lart_partitions[result].size / 1024); 675 result,lart_partitions[result].size,lart_partitions[result].size / 1024);
683#endif 676#endif
684#endif
685 677
686#ifndef HAVE_PARTITIONS
687 result = mtd_device_register(&mtd, NULL, 0);
688#else
689 result = mtd_device_register(&mtd, lart_partitions, 678 result = mtd_device_register(&mtd, lart_partitions,
690 ARRAY_SIZE(lart_partitions)); 679 ARRAY_SIZE(lart_partitions));
691#endif
692 680
693 return (result); 681 return (result);
694} 682}
695 683
696static void __exit lart_flash_exit (void) 684static void __exit lart_flash_exit (void)
697{ 685{
698#ifndef HAVE_PARTITIONS
699 mtd_device_unregister(&mtd);
700#else
701 mtd_device_unregister(&mtd); 686 mtd_device_unregister(&mtd);
702#endif
703} 687}
704 688
705module_init (lart_flash_init); 689module_init (lart_flash_init);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 35180e475c4c..884904d3f9d2 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -30,6 +30,7 @@
30#include <linux/mtd/cfi.h> 30#include <linux/mtd/cfi.h>
31#include <linux/mtd/mtd.h> 31#include <linux/mtd/mtd.h>
32#include <linux/mtd/partitions.h> 32#include <linux/mtd/partitions.h>
33#include <linux/of_platform.h>
33 34
34#include <linux/spi/spi.h> 35#include <linux/spi/spi.h>
35#include <linux/spi/flash.h> 36#include <linux/spi/flash.h>
@@ -88,7 +89,6 @@ struct m25p {
88 struct spi_device *spi; 89 struct spi_device *spi;
89 struct mutex lock; 90 struct mutex lock;
90 struct mtd_info mtd; 91 struct mtd_info mtd;
91 unsigned partitioned:1;
92 u16 page_size; 92 u16 page_size;
93 u16 addr_width; 93 u16 addr_width;
94 u8 erase_opcode; 94 u8 erase_opcode;
@@ -209,9 +209,8 @@ static int wait_till_ready(struct m25p *flash)
209 */ 209 */
210static int erase_chip(struct m25p *flash) 210static int erase_chip(struct m25p *flash)
211{ 211{
212 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %lldKiB\n", 212 pr_debug("%s: %s %lldKiB\n", dev_name(&flash->spi->dev), __func__,
213 dev_name(&flash->spi->dev), __func__, 213 (long long)(flash->mtd.size >> 10));
214 (long long)(flash->mtd.size >> 10));
215 214
216 /* Wait until finished previous write command. */ 215 /* Wait until finished previous write command. */
217 if (wait_till_ready(flash)) 216 if (wait_till_ready(flash))
@@ -250,9 +249,8 @@ static int m25p_cmdsz(struct m25p *flash)
250 */ 249 */
251static int erase_sector(struct m25p *flash, u32 offset) 250static int erase_sector(struct m25p *flash, u32 offset)
252{ 251{
253 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n", 252 pr_debug("%s: %s %dKiB at 0x%08x\n", dev_name(&flash->spi->dev),
254 dev_name(&flash->spi->dev), __func__, 253 __func__, flash->mtd.erasesize / 1024, offset);
255 flash->mtd.erasesize / 1024, offset);
256 254
257 /* Wait until finished previous write command. */ 255 /* Wait until finished previous write command. */
258 if (wait_till_ready(flash)) 256 if (wait_till_ready(flash))
@@ -286,9 +284,9 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
286 u32 addr,len; 284 u32 addr,len;
287 uint32_t rem; 285 uint32_t rem;
288 286
289 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%llx, len %lld\n", 287 pr_debug("%s: %s at 0x%llx, len %lld\n", dev_name(&flash->spi->dev),
290 dev_name(&flash->spi->dev), __func__, "at", 288 __func__, (long long)instr->addr,
291 (long long)instr->addr, (long long)instr->len); 289 (long long)instr->len);
292 290
293 /* sanity checks */ 291 /* sanity checks */
294 if (instr->addr + instr->len > flash->mtd.size) 292 if (instr->addr + instr->len > flash->mtd.size)
@@ -348,9 +346,8 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
348 struct spi_transfer t[2]; 346 struct spi_transfer t[2];
349 struct spi_message m; 347 struct spi_message m;
350 348
351 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", 349 pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
352 dev_name(&flash->spi->dev), __func__, "from", 350 __func__, (u32)from, len);
353 (u32)from, len);
354 351
355 /* sanity checks */ 352 /* sanity checks */
356 if (!len) 353 if (!len)
@@ -417,9 +414,8 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
417 struct spi_transfer t[2]; 414 struct spi_transfer t[2];
418 struct spi_message m; 415 struct spi_message m;
419 416
420 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", 417 pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
421 dev_name(&flash->spi->dev), __func__, "to", 418 __func__, (u32)to, len);
422 (u32)to, len);
423 419
424 *retlen = 0; 420 *retlen = 0;
425 421
@@ -510,9 +506,8 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
510 size_t actual; 506 size_t actual;
511 int cmd_sz, ret; 507 int cmd_sz, ret;
512 508
513 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", 509 pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
514 dev_name(&flash->spi->dev), __func__, "to", 510 __func__, (u32)to, len);
515 (u32)to, len);
516 511
517 *retlen = 0; 512 *retlen = 0;
518 513
@@ -661,6 +656,7 @@ static const struct spi_device_id m25p_ids[] = {
661 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) }, 656 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
662 657
663 { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) }, 658 { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
659 { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
664 { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) }, 660 { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
665 661
666 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) }, 662 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
@@ -671,6 +667,7 @@ static const struct spi_device_id m25p_ids[] = {
671 /* EON -- en25xxx */ 667 /* EON -- en25xxx */
672 { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, 668 { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
673 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, 669 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
670 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
674 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, 671 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
675 672
676 /* Intel/Numonyx -- xxxs33b */ 673 /* Intel/Numonyx -- xxxs33b */
@@ -788,8 +785,8 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
788 */ 785 */
789 tmp = spi_write_then_read(spi, &code, 1, id, 5); 786 tmp = spi_write_then_read(spi, &code, 1, id, 5);
790 if (tmp < 0) { 787 if (tmp < 0) {
791 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n", 788 pr_debug("%s: error %d reading JEDEC ID\n",
792 dev_name(&spi->dev), tmp); 789 dev_name(&spi->dev), tmp);
793 return ERR_PTR(tmp); 790 return ERR_PTR(tmp);
794 } 791 }
795 jedec = id[0]; 792 jedec = id[0];
@@ -825,8 +822,12 @@ static int __devinit m25p_probe(struct spi_device *spi)
825 struct m25p *flash; 822 struct m25p *flash;
826 struct flash_info *info; 823 struct flash_info *info;
827 unsigned i; 824 unsigned i;
828 struct mtd_partition *parts = NULL; 825 struct mtd_part_parser_data ppdata;
829 int nr_parts = 0; 826
827#ifdef CONFIG_MTD_OF_PARTS
828 if (!of_device_is_available(spi->dev.of_node))
829 return -ENODEV;
830#endif
830 831
831 /* Platform data helps sort out which chip type we have, as 832 /* Platform data helps sort out which chip type we have, as
832 * well as how this board partitions it. If we don't have 833 * well as how this board partitions it. If we don't have
@@ -928,6 +929,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
928 if (info->flags & M25P_NO_ERASE) 929 if (info->flags & M25P_NO_ERASE)
929 flash->mtd.flags |= MTD_NO_ERASE; 930 flash->mtd.flags |= MTD_NO_ERASE;
930 931
932 ppdata.of_node = spi->dev.of_node;
931 flash->mtd.dev.parent = &spi->dev; 933 flash->mtd.dev.parent = &spi->dev;
932 flash->page_size = info->page_size; 934 flash->page_size = info->page_size;
933 935
@@ -945,8 +947,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
945 dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name, 947 dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
946 (long long)flash->mtd.size >> 10); 948 (long long)flash->mtd.size >> 10);
947 949
948 DEBUG(MTD_DEBUG_LEVEL2, 950 pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) "
949 "mtd .name = %s, .size = 0x%llx (%lldMiB) "
950 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", 951 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
951 flash->mtd.name, 952 flash->mtd.name,
952 (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), 953 (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
@@ -955,8 +956,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
955 956
956 if (flash->mtd.numeraseregions) 957 if (flash->mtd.numeraseregions)
957 for (i = 0; i < flash->mtd.numeraseregions; i++) 958 for (i = 0; i < flash->mtd.numeraseregions; i++)
958 DEBUG(MTD_DEBUG_LEVEL2, 959 pr_debug("mtd.eraseregions[%d] = { .offset = 0x%llx, "
959 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
960 ".erasesize = 0x%.8x (%uKiB), " 960 ".erasesize = 0x%.8x (%uKiB), "
961 ".numblocks = %d }\n", 961 ".numblocks = %d }\n",
962 i, (long long)flash->mtd.eraseregions[i].offset, 962 i, (long long)flash->mtd.eraseregions[i].offset,
@@ -968,41 +968,9 @@ static int __devinit m25p_probe(struct spi_device *spi)
968 /* partitions should match sector boundaries; and it may be good to 968 /* partitions should match sector boundaries; and it may be good to
969 * use readonly partitions for writeprotected sectors (BP2..BP0). 969 * use readonly partitions for writeprotected sectors (BP2..BP0).
970 */ 970 */
971 if (mtd_has_cmdlinepart()) { 971 return mtd_device_parse_register(&flash->mtd, NULL, &ppdata,
972 static const char *part_probes[] 972 data ? data->parts : NULL,
973 = { "cmdlinepart", NULL, }; 973 data ? data->nr_parts : 0);
974
975 nr_parts = parse_mtd_partitions(&flash->mtd,
976 part_probes, &parts, 0);
977 }
978
979 if (nr_parts <= 0 && data && data->parts) {
980 parts = data->parts;
981 nr_parts = data->nr_parts;
982 }
983
984#ifdef CONFIG_MTD_OF_PARTS
985 if (nr_parts <= 0 && spi->dev.of_node) {
986 nr_parts = of_mtd_parse_partitions(&spi->dev,
987 spi->dev.of_node, &parts);
988 }
989#endif
990
991 if (nr_parts > 0) {
992 for (i = 0; i < nr_parts; i++) {
993 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
994 "{.name = %s, .offset = 0x%llx, "
995 ".size = 0x%llx (%lldKiB) }\n",
996 i, parts[i].name,
997 (long long)parts[i].offset,
998 (long long)parts[i].size,
999 (long long)(parts[i].size >> 10));
1000 }
1001 flash->partitioned = 1;
1002 }
1003
1004 return mtd_device_register(&flash->mtd, parts, nr_parts) == 1 ?
1005 -ENODEV : 0;
1006} 974}
1007 975
1008 976
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 13749d458a31..d75c7af18a63 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -17,6 +17,8 @@
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/math64.h> 19#include <linux/math64.h>
20#include <linux/of.h>
21#include <linux/of_device.h>
20 22
21#include <linux/spi/spi.h> 23#include <linux/spi/spi.h>
22#include <linux/spi/flash.h> 24#include <linux/spi/flash.h>
@@ -24,7 +26,6 @@
24#include <linux/mtd/mtd.h> 26#include <linux/mtd/mtd.h>
25#include <linux/mtd/partitions.h> 27#include <linux/mtd/partitions.h>
26 28
27
28/* 29/*
29 * DataFlash is a kind of SPI flash. Most AT45 chips have two buffers in 30 * DataFlash is a kind of SPI flash. Most AT45 chips have two buffers in
30 * each chip, which may be used for double buffered I/O; but this driver 31 * each chip, which may be used for double buffered I/O; but this driver
@@ -98,6 +99,16 @@ struct dataflash {
98 struct mtd_info mtd; 99 struct mtd_info mtd;
99}; 100};
100 101
102#ifdef CONFIG_OF
103static const struct of_device_id dataflash_dt_ids[] = {
104 { .compatible = "atmel,at45", },
105 { .compatible = "atmel,dataflash", },
106 { /* sentinel */ }
107};
108#else
109#define dataflash_dt_ids NULL
110#endif
111
101/* ......................................................................... */ 112/* ......................................................................... */
102 113
103/* 114/*
@@ -122,7 +133,7 @@ static int dataflash_waitready(struct spi_device *spi)
122 for (;;) { 133 for (;;) {
123 status = dataflash_status(spi); 134 status = dataflash_status(spi);
124 if (status < 0) { 135 if (status < 0) {
125 DEBUG(MTD_DEBUG_LEVEL1, "%s: status %d?\n", 136 pr_debug("%s: status %d?\n",
126 dev_name(&spi->dev), status); 137 dev_name(&spi->dev), status);
127 status = 0; 138 status = 0;
128 } 139 }
@@ -149,7 +160,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
149 uint8_t *command; 160 uint8_t *command;
150 uint32_t rem; 161 uint32_t rem;
151 162
152 DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%llx len 0x%llx\n", 163 pr_debug("%s: erase addr=0x%llx len 0x%llx\n",
153 dev_name(&spi->dev), (long long)instr->addr, 164 dev_name(&spi->dev), (long long)instr->addr,
154 (long long)instr->len); 165 (long long)instr->len);
155 166
@@ -187,7 +198,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
187 command[2] = (uint8_t)(pageaddr >> 8); 198 command[2] = (uint8_t)(pageaddr >> 8);
188 command[3] = 0; 199 command[3] = 0;
189 200
190 DEBUG(MTD_DEBUG_LEVEL3, "ERASE %s: (%x) %x %x %x [%i]\n", 201 pr_debug("ERASE %s: (%x) %x %x %x [%i]\n",
191 do_block ? "block" : "page", 202 do_block ? "block" : "page",
192 command[0], command[1], command[2], command[3], 203 command[0], command[1], command[2], command[3],
193 pageaddr); 204 pageaddr);
@@ -238,8 +249,8 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
238 uint8_t *command; 249 uint8_t *command;
239 int status; 250 int status;
240 251
241 DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n", 252 pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev),
242 dev_name(&priv->spi->dev), (unsigned)from, (unsigned)(from + len)); 253 (unsigned)from, (unsigned)(from + len));
243 254
244 *retlen = 0; 255 *retlen = 0;
245 256
@@ -255,7 +266,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
255 266
256 command = priv->command; 267 command = priv->command;
257 268
258 DEBUG(MTD_DEBUG_LEVEL3, "READ: (%x) %x %x %x\n", 269 pr_debug("READ: (%x) %x %x %x\n",
259 command[0], command[1], command[2], command[3]); 270 command[0], command[1], command[2], command[3]);
260 271
261 spi_message_init(&msg); 272 spi_message_init(&msg);
@@ -287,7 +298,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
287 *retlen = msg.actual_length - 8; 298 *retlen = msg.actual_length - 8;
288 status = 0; 299 status = 0;
289 } else 300 } else
290 DEBUG(MTD_DEBUG_LEVEL1, "%s: read %x..%x --> %d\n", 301 pr_debug("%s: read %x..%x --> %d\n",
291 dev_name(&priv->spi->dev), 302 dev_name(&priv->spi->dev),
292 (unsigned)from, (unsigned)(from + len), 303 (unsigned)from, (unsigned)(from + len),
293 status); 304 status);
@@ -314,7 +325,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
314 int status = -EINVAL; 325 int status = -EINVAL;
315 uint8_t *command; 326 uint8_t *command;
316 327
317 DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n", 328 pr_debug("%s: write 0x%x..0x%x\n",
318 dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len)); 329 dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len));
319 330
320 *retlen = 0; 331 *retlen = 0;
@@ -340,7 +351,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
340 351
341 mutex_lock(&priv->lock); 352 mutex_lock(&priv->lock);
342 while (remaining > 0) { 353 while (remaining > 0) {
343 DEBUG(MTD_DEBUG_LEVEL3, "write @ %i:%i len=%i\n", 354 pr_debug("write @ %i:%i len=%i\n",
344 pageaddr, offset, writelen); 355 pageaddr, offset, writelen);
345 356
346 /* REVISIT: 357 /* REVISIT:
@@ -368,12 +379,12 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
368 command[2] = (addr & 0x0000FF00) >> 8; 379 command[2] = (addr & 0x0000FF00) >> 8;
369 command[3] = 0; 380 command[3] = 0;
370 381
371 DEBUG(MTD_DEBUG_LEVEL3, "TRANSFER: (%x) %x %x %x\n", 382 pr_debug("TRANSFER: (%x) %x %x %x\n",
372 command[0], command[1], command[2], command[3]); 383 command[0], command[1], command[2], command[3]);
373 384
374 status = spi_sync(spi, &msg); 385 status = spi_sync(spi, &msg);
375 if (status < 0) 386 if (status < 0)
376 DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n", 387 pr_debug("%s: xfer %u -> %d\n",
377 dev_name(&spi->dev), addr, status); 388 dev_name(&spi->dev), addr, status);
378 389
379 (void) dataflash_waitready(priv->spi); 390 (void) dataflash_waitready(priv->spi);
@@ -386,7 +397,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
386 command[2] = (addr & 0x0000FF00) >> 8; 397 command[2] = (addr & 0x0000FF00) >> 8;
387 command[3] = (addr & 0x000000FF); 398 command[3] = (addr & 0x000000FF);
388 399
389 DEBUG(MTD_DEBUG_LEVEL3, "PROGRAM: (%x) %x %x %x\n", 400 pr_debug("PROGRAM: (%x) %x %x %x\n",
390 command[0], command[1], command[2], command[3]); 401 command[0], command[1], command[2], command[3]);
391 402
392 x[1].tx_buf = writebuf; 403 x[1].tx_buf = writebuf;
@@ -395,7 +406,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
395 status = spi_sync(spi, &msg); 406 status = spi_sync(spi, &msg);
396 spi_transfer_del(x + 1); 407 spi_transfer_del(x + 1);
397 if (status < 0) 408 if (status < 0)
398 DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n", 409 pr_debug("%s: pgm %u/%u -> %d\n",
399 dev_name(&spi->dev), addr, writelen, status); 410 dev_name(&spi->dev), addr, writelen, status);
400 411
401 (void) dataflash_waitready(priv->spi); 412 (void) dataflash_waitready(priv->spi);
@@ -410,12 +421,12 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
410 command[2] = (addr & 0x0000FF00) >> 8; 421 command[2] = (addr & 0x0000FF00) >> 8;
411 command[3] = 0; 422 command[3] = 0;
412 423
413 DEBUG(MTD_DEBUG_LEVEL3, "COMPARE: (%x) %x %x %x\n", 424 pr_debug("COMPARE: (%x) %x %x %x\n",
414 command[0], command[1], command[2], command[3]); 425 command[0], command[1], command[2], command[3]);
415 426
416 status = spi_sync(spi, &msg); 427 status = spi_sync(spi, &msg);
417 if (status < 0) 428 if (status < 0)
418 DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n", 429 pr_debug("%s: compare %u -> %d\n",
419 dev_name(&spi->dev), addr, status); 430 dev_name(&spi->dev), addr, status);
420 431
421 status = dataflash_waitready(priv->spi); 432 status = dataflash_waitready(priv->spi);
@@ -634,11 +645,10 @@ add_dataflash_otp(struct spi_device *spi, char *name,
634{ 645{
635 struct dataflash *priv; 646 struct dataflash *priv;
636 struct mtd_info *device; 647 struct mtd_info *device;
648 struct mtd_part_parser_data ppdata;
637 struct flash_platform_data *pdata = spi->dev.platform_data; 649 struct flash_platform_data *pdata = spi->dev.platform_data;
638 char *otp_tag = ""; 650 char *otp_tag = "";
639 int err = 0; 651 int err = 0;
640 struct mtd_partition *parts;
641 int nr_parts = 0;
642 652
643 priv = kzalloc(sizeof *priv, GFP_KERNEL); 653 priv = kzalloc(sizeof *priv, GFP_KERNEL);
644 if (!priv) 654 if (!priv)
@@ -677,28 +687,11 @@ add_dataflash_otp(struct spi_device *spi, char *name,
677 pagesize, otp_tag); 687 pagesize, otp_tag);
678 dev_set_drvdata(&spi->dev, priv); 688 dev_set_drvdata(&spi->dev, priv);
679 689
680 if (mtd_has_cmdlinepart()) { 690 ppdata.of_node = spi->dev.of_node;
681 static const char *part_probes[] = { "cmdlinepart", NULL, }; 691 err = mtd_device_parse_register(device, NULL, &ppdata,
682 692 pdata ? pdata->parts : NULL,
683 nr_parts = parse_mtd_partitions(device, part_probes, &parts, 693 pdata ? pdata->nr_parts : 0);
684 0);
685 }
686 694
687 if (nr_parts <= 0 && pdata && pdata->parts) {
688 parts = pdata->parts;
689 nr_parts = pdata->nr_parts;
690 }
691
692 if (nr_parts > 0) {
693 priv->partitioned = 1;
694 err = mtd_device_register(device, parts, nr_parts);
695 goto out;
696 }
697
698 if (mtd_device_register(device, NULL, 0) == 1)
699 err = -ENODEV;
700
701out:
702 if (!err) 695 if (!err)
703 return 0; 696 return 0;
704 697
@@ -787,7 +780,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
787 */ 780 */
788 tmp = spi_write_then_read(spi, &code, 1, id, 3); 781 tmp = spi_write_then_read(spi, &code, 1, id, 3);
789 if (tmp < 0) { 782 if (tmp < 0) {
790 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n", 783 pr_debug("%s: error %d reading JEDEC ID\n",
791 dev_name(&spi->dev), tmp); 784 dev_name(&spi->dev), tmp);
792 return ERR_PTR(tmp); 785 return ERR_PTR(tmp);
793 } 786 }
@@ -804,7 +797,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
804 tmp < ARRAY_SIZE(dataflash_data); 797 tmp < ARRAY_SIZE(dataflash_data);
805 tmp++, info++) { 798 tmp++, info++) {
806 if (info->jedec_id == jedec) { 799 if (info->jedec_id == jedec) {
807 DEBUG(MTD_DEBUG_LEVEL1, "%s: OTP, sector protect%s\n", 800 pr_debug("%s: OTP, sector protect%s\n",
808 dev_name(&spi->dev), 801 dev_name(&spi->dev),
809 (info->flags & SUP_POW2PS) 802 (info->flags & SUP_POW2PS)
810 ? ", binary pagesize" : "" 803 ? ", binary pagesize" : ""
@@ -812,8 +805,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
812 if (info->flags & SUP_POW2PS) { 805 if (info->flags & SUP_POW2PS) {
813 status = dataflash_status(spi); 806 status = dataflash_status(spi);
814 if (status < 0) { 807 if (status < 0) {
815 DEBUG(MTD_DEBUG_LEVEL1, 808 pr_debug("%s: status error %d\n",
816 "%s: status error %d\n",
817 dev_name(&spi->dev), status); 809 dev_name(&spi->dev), status);
818 return ERR_PTR(status); 810 return ERR_PTR(status);
819 } 811 }
@@ -878,7 +870,7 @@ static int __devinit dataflash_probe(struct spi_device *spi)
878 */ 870 */
879 status = dataflash_status(spi); 871 status = dataflash_status(spi);
880 if (status <= 0 || status == 0xff) { 872 if (status <= 0 || status == 0xff) {
881 DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n", 873 pr_debug("%s: status error %d\n",
882 dev_name(&spi->dev), status); 874 dev_name(&spi->dev), status);
883 if (status == 0 || status == 0xff) 875 if (status == 0 || status == 0xff)
884 status = -ENODEV; 876 status = -ENODEV;
@@ -914,14 +906,14 @@ static int __devinit dataflash_probe(struct spi_device *spi)
914 break; 906 break;
915 /* obsolete AT45DB1282 not (yet?) supported */ 907 /* obsolete AT45DB1282 not (yet?) supported */
916 default: 908 default:
917 DEBUG(MTD_DEBUG_LEVEL1, "%s: unsupported device (%x)\n", 909 pr_debug("%s: unsupported device (%x)\n", dev_name(&spi->dev),
918 dev_name(&spi->dev), status & 0x3c); 910 status & 0x3c);
919 status = -ENODEV; 911 status = -ENODEV;
920 } 912 }
921 913
922 if (status < 0) 914 if (status < 0)
923 DEBUG(MTD_DEBUG_LEVEL1, "%s: add_dataflash --> %d\n", 915 pr_debug("%s: add_dataflash --> %d\n", dev_name(&spi->dev),
924 dev_name(&spi->dev), status); 916 status);
925 917
926 return status; 918 return status;
927} 919}
@@ -931,7 +923,7 @@ static int __devexit dataflash_remove(struct spi_device *spi)
931 struct dataflash *flash = dev_get_drvdata(&spi->dev); 923 struct dataflash *flash = dev_get_drvdata(&spi->dev);
932 int status; 924 int status;
933 925
934 DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev)); 926 pr_debug("%s: remove\n", dev_name(&spi->dev));
935 927
936 status = mtd_device_unregister(&flash->mtd); 928 status = mtd_device_unregister(&flash->mtd);
937 if (status == 0) { 929 if (status == 0) {
@@ -946,6 +938,7 @@ static struct spi_driver dataflash_driver = {
946 .name = "mtd_dataflash", 938 .name = "mtd_dataflash",
947 .bus = &spi_bus_type, 939 .bus = &spi_bus_type,
948 .owner = THIS_MODULE, 940 .owner = THIS_MODULE,
941 .of_match_table = dataflash_dt_ids,
949 }, 942 },
950 943
951 .probe = dataflash_probe, 944 .probe = dataflash_probe,
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 83e80c65d6e7..d38ef3bffe8d 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -52,8 +52,6 @@ struct sst25l_flash {
52 struct spi_device *spi; 52 struct spi_device *spi;
53 struct mutex lock; 53 struct mutex lock;
54 struct mtd_info mtd; 54 struct mtd_info mtd;
55
56 int partitioned;
57}; 55};
58 56
59struct flash_info { 57struct flash_info {
@@ -381,8 +379,6 @@ static int __devinit sst25l_probe(struct spi_device *spi)
381 struct sst25l_flash *flash; 379 struct sst25l_flash *flash;
382 struct flash_platform_data *data; 380 struct flash_platform_data *data;
383 int ret, i; 381 int ret, i;
384 struct mtd_partition *parts = NULL;
385 int nr_parts = 0;
386 382
387 flash_info = sst25l_match_device(spi); 383 flash_info = sst25l_match_device(spi);
388 if (!flash_info) 384 if (!flash_info)
@@ -414,8 +410,7 @@ static int __devinit sst25l_probe(struct spi_device *spi)
414 dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name, 410 dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name,
415 (long long)flash->mtd.size >> 10); 411 (long long)flash->mtd.size >> 10);
416 412
417 DEBUG(MTD_DEBUG_LEVEL2, 413 pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) "
418 "mtd .name = %s, .size = 0x%llx (%lldMiB) "
419 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", 414 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
420 flash->mtd.name, 415 flash->mtd.name,
421 (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), 416 (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
@@ -423,37 +418,10 @@ static int __devinit sst25l_probe(struct spi_device *spi)
423 flash->mtd.numeraseregions); 418 flash->mtd.numeraseregions);
424 419
425 420
426 if (mtd_has_cmdlinepart()) { 421 ret = mtd_device_parse_register(&flash->mtd, NULL, 0,
427 static const char *part_probes[] = {"cmdlinepart", NULL}; 422 data ? data->parts : NULL,
428 423 data ? data->nr_parts : 0);
429 nr_parts = parse_mtd_partitions(&flash->mtd, 424 if (ret) {
430 part_probes,
431 &parts, 0);
432 }
433
434 if (nr_parts <= 0 && data && data->parts) {
435 parts = data->parts;
436 nr_parts = data->nr_parts;
437 }
438
439 if (nr_parts > 0) {
440 for (i = 0; i < nr_parts; i++) {
441 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
442 "{.name = %s, .offset = 0x%llx, "
443 ".size = 0x%llx (%lldKiB) }\n",
444 i, parts[i].name,
445 (long long)parts[i].offset,
446 (long long)parts[i].size,
447 (long long)(parts[i].size >> 10));
448 }
449
450 flash->partitioned = 1;
451 return mtd_device_register(&flash->mtd, parts,
452 nr_parts);
453 }
454
455 ret = mtd_device_register(&flash->mtd, NULL, 0);
456 if (ret == 1) {
457 kfree(flash); 425 kfree(flash);
458 dev_set_drvdata(&spi->dev, NULL); 426 dev_set_drvdata(&spi->dev, NULL);
459 return -ENODEV; 427 return -ENODEV;
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 037b399df3f1..c7382bb686c6 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -339,7 +339,7 @@ static int erase_xfer(partition_t *part,
339 struct erase_info *erase; 339 struct erase_info *erase;
340 340
341 xfer = &part->XferInfo[xfernum]; 341 xfer = &part->XferInfo[xfernum];
342 DEBUG(1, "ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset); 342 pr_debug("ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset);
343 xfer->state = XFER_ERASING; 343 xfer->state = XFER_ERASING;
344 344
345 /* Is there a free erase slot? Always in MTD. */ 345 /* Is there a free erase slot? Always in MTD. */
@@ -415,7 +415,7 @@ static int prepare_xfer(partition_t *part, int i)
415 xfer = &part->XferInfo[i]; 415 xfer = &part->XferInfo[i];
416 xfer->state = XFER_FAILED; 416 xfer->state = XFER_FAILED;
417 417
418 DEBUG(1, "ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset); 418 pr_debug("ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset);
419 419
420 /* Write the transfer unit header */ 420 /* Write the transfer unit header */
421 header = part->header; 421 header = part->header;
@@ -476,7 +476,7 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
476 476
477 eun = &part->EUNInfo[srcunit]; 477 eun = &part->EUNInfo[srcunit];
478 xfer = &part->XferInfo[xferunit]; 478 xfer = &part->XferInfo[xferunit];
479 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n", 479 pr_debug("ftl_cs: copying block 0x%x to 0x%x\n",
480 eun->Offset, xfer->Offset); 480 eun->Offset, xfer->Offset);
481 481
482 482
@@ -598,7 +598,7 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
598 unit with the fewest erases, and usually pick the data unit with 598 unit with the fewest erases, and usually pick the data unit with
599 the most deleted blocks. But with a small probability, pick the 599 the most deleted blocks. But with a small probability, pick the
600 oldest data unit instead. This means that we generally postpone 600 oldest data unit instead. This means that we generally postpone
601 the next reclaimation as long as possible, but shuffle static 601 the next reclamation as long as possible, but shuffle static
602 stuff around a bit for wear leveling. 602 stuff around a bit for wear leveling.
603 603
604======================================================================*/ 604======================================================================*/
@@ -609,8 +609,8 @@ static int reclaim_block(partition_t *part)
609 uint32_t best; 609 uint32_t best;
610 int queued, ret; 610 int queued, ret;
611 611
612 DEBUG(0, "ftl_cs: reclaiming space...\n"); 612 pr_debug("ftl_cs: reclaiming space...\n");
613 DEBUG(3, "NumTransferUnits == %x\n", part->header.NumTransferUnits); 613 pr_debug("NumTransferUnits == %x\n", part->header.NumTransferUnits);
614 /* Pick the least erased transfer unit */ 614 /* Pick the least erased transfer unit */
615 best = 0xffffffff; xfer = 0xffff; 615 best = 0xffffffff; xfer = 0xffff;
616 do { 616 do {
@@ -618,22 +618,22 @@ static int reclaim_block(partition_t *part)
618 for (i = 0; i < part->header.NumTransferUnits; i++) { 618 for (i = 0; i < part->header.NumTransferUnits; i++) {
619 int n=0; 619 int n=0;
620 if (part->XferInfo[i].state == XFER_UNKNOWN) { 620 if (part->XferInfo[i].state == XFER_UNKNOWN) {
621 DEBUG(3,"XferInfo[%d].state == XFER_UNKNOWN\n",i); 621 pr_debug("XferInfo[%d].state == XFER_UNKNOWN\n",i);
622 n=1; 622 n=1;
623 erase_xfer(part, i); 623 erase_xfer(part, i);
624 } 624 }
625 if (part->XferInfo[i].state == XFER_ERASING) { 625 if (part->XferInfo[i].state == XFER_ERASING) {
626 DEBUG(3,"XferInfo[%d].state == XFER_ERASING\n",i); 626 pr_debug("XferInfo[%d].state == XFER_ERASING\n",i);
627 n=1; 627 n=1;
628 queued = 1; 628 queued = 1;
629 } 629 }
630 else if (part->XferInfo[i].state == XFER_ERASED) { 630 else if (part->XferInfo[i].state == XFER_ERASED) {
631 DEBUG(3,"XferInfo[%d].state == XFER_ERASED\n",i); 631 pr_debug("XferInfo[%d].state == XFER_ERASED\n",i);
632 n=1; 632 n=1;
633 prepare_xfer(part, i); 633 prepare_xfer(part, i);
634 } 634 }
635 if (part->XferInfo[i].state == XFER_PREPARED) { 635 if (part->XferInfo[i].state == XFER_PREPARED) {
636 DEBUG(3,"XferInfo[%d].state == XFER_PREPARED\n",i); 636 pr_debug("XferInfo[%d].state == XFER_PREPARED\n",i);
637 n=1; 637 n=1;
638 if (part->XferInfo[i].EraseCount <= best) { 638 if (part->XferInfo[i].EraseCount <= best) {
639 best = part->XferInfo[i].EraseCount; 639 best = part->XferInfo[i].EraseCount;
@@ -641,12 +641,12 @@ static int reclaim_block(partition_t *part)
641 } 641 }
642 } 642 }
643 if (!n) 643 if (!n)
644 DEBUG(3,"XferInfo[%d].state == %x\n",i, part->XferInfo[i].state); 644 pr_debug("XferInfo[%d].state == %x\n",i, part->XferInfo[i].state);
645 645
646 } 646 }
647 if (xfer == 0xffff) { 647 if (xfer == 0xffff) {
648 if (queued) { 648 if (queued) {
649 DEBUG(1, "ftl_cs: waiting for transfer " 649 pr_debug("ftl_cs: waiting for transfer "
650 "unit to be prepared...\n"); 650 "unit to be prepared...\n");
651 if (part->mbd.mtd->sync) 651 if (part->mbd.mtd->sync)
652 part->mbd.mtd->sync(part->mbd.mtd); 652 part->mbd.mtd->sync(part->mbd.mtd);
@@ -656,7 +656,7 @@ static int reclaim_block(partition_t *part)
656 printk(KERN_NOTICE "ftl_cs: reclaim failed: no " 656 printk(KERN_NOTICE "ftl_cs: reclaim failed: no "
657 "suitable transfer units!\n"); 657 "suitable transfer units!\n");
658 else 658 else
659 DEBUG(1, "ftl_cs: reclaim failed: no " 659 pr_debug("ftl_cs: reclaim failed: no "
660 "suitable transfer units!\n"); 660 "suitable transfer units!\n");
661 661
662 return -EIO; 662 return -EIO;
@@ -666,7 +666,7 @@ static int reclaim_block(partition_t *part)
666 666
667 eun = 0; 667 eun = 0;
668 if ((jiffies % shuffle_freq) == 0) { 668 if ((jiffies % shuffle_freq) == 0) {
669 DEBUG(1, "ftl_cs: recycling freshest block...\n"); 669 pr_debug("ftl_cs: recycling freshest block...\n");
670 best = 0xffffffff; 670 best = 0xffffffff;
671 for (i = 0; i < part->DataUnits; i++) 671 for (i = 0; i < part->DataUnits; i++)
672 if (part->EUNInfo[i].EraseCount <= best) { 672 if (part->EUNInfo[i].EraseCount <= best) {
@@ -686,7 +686,7 @@ static int reclaim_block(partition_t *part)
686 printk(KERN_NOTICE "ftl_cs: reclaim failed: " 686 printk(KERN_NOTICE "ftl_cs: reclaim failed: "
687 "no free blocks!\n"); 687 "no free blocks!\n");
688 else 688 else
689 DEBUG(1,"ftl_cs: reclaim failed: " 689 pr_debug("ftl_cs: reclaim failed: "
690 "no free blocks!\n"); 690 "no free blocks!\n");
691 691
692 return -EIO; 692 return -EIO;
@@ -771,7 +771,7 @@ static uint32_t find_free(partition_t *part)
771 printk(KERN_NOTICE "ftl_cs: bad free list!\n"); 771 printk(KERN_NOTICE "ftl_cs: bad free list!\n");
772 return 0; 772 return 0;
773 } 773 }
774 DEBUG(2, "ftl_cs: found free block at %d in %d\n", blk, eun); 774 pr_debug("ftl_cs: found free block at %d in %d\n", blk, eun);
775 return blk; 775 return blk;
776 776
777} /* find_free */ 777} /* find_free */
@@ -791,7 +791,7 @@ static int ftl_read(partition_t *part, caddr_t buffer,
791 int ret; 791 int ret;
792 size_t offset, retlen; 792 size_t offset, retlen;
793 793
794 DEBUG(2, "ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n", 794 pr_debug("ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n",
795 part, sector, nblocks); 795 part, sector, nblocks);
796 if (!(part->state & FTL_FORMATTED)) { 796 if (!(part->state & FTL_FORMATTED)) {
797 printk(KERN_NOTICE "ftl_cs: bad partition\n"); 797 printk(KERN_NOTICE "ftl_cs: bad partition\n");
@@ -840,7 +840,7 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr,
840 int ret; 840 int ret;
841 size_t retlen, offset; 841 size_t retlen, offset;
842 842
843 DEBUG(2, "ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n", 843 pr_debug("ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n",
844 part, log_addr, virt_addr); 844 part, log_addr, virt_addr);
845 bsize = 1 << part->header.EraseUnitSize; 845 bsize = 1 << part->header.EraseUnitSize;
846 eun = log_addr / bsize; 846 eun = log_addr / bsize;
@@ -905,7 +905,7 @@ static int ftl_write(partition_t *part, caddr_t buffer,
905 int ret; 905 int ret;
906 size_t retlen, offset; 906 size_t retlen, offset;
907 907
908 DEBUG(2, "ftl_cs: ftl_write(0x%p, %ld, %ld)\n", 908 pr_debug("ftl_cs: ftl_write(0x%p, %ld, %ld)\n",
909 part, sector, nblocks); 909 part, sector, nblocks);
910 if (!(part->state & FTL_FORMATTED)) { 910 if (!(part->state & FTL_FORMATTED)) {
911 printk(KERN_NOTICE "ftl_cs: bad partition\n"); 911 printk(KERN_NOTICE "ftl_cs: bad partition\n");
@@ -1011,7 +1011,7 @@ static int ftl_discardsect(struct mtd_blktrans_dev *dev,
1011 partition_t *part = (void *)dev; 1011 partition_t *part = (void *)dev;
1012 uint32_t bsize = 1 << part->header.EraseUnitSize; 1012 uint32_t bsize = 1 << part->header.EraseUnitSize;
1013 1013
1014 DEBUG(1, "FTL erase sector %ld for %d sectors\n", 1014 pr_debug("FTL erase sector %ld for %d sectors\n",
1015 sector, nr_sects); 1015 sector, nr_sects);
1016 1016
1017 while (nr_sects) { 1017 while (nr_sects) {
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index d7592e67d048..dd034efd1875 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -63,14 +63,12 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
63 return; 63 return;
64 } 64 }
65 65
66 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: add_mtd for %s\n", mtd->name); 66 pr_debug("INFTL: add_mtd for %s\n", mtd->name);
67 67
68 inftl = kzalloc(sizeof(*inftl), GFP_KERNEL); 68 inftl = kzalloc(sizeof(*inftl), GFP_KERNEL);
69 69
70 if (!inftl) { 70 if (!inftl)
71 printk(KERN_WARNING "INFTL: Out of memory for data structures\n");
72 return; 71 return;
73 }
74 72
75 inftl->mbd.mtd = mtd; 73 inftl->mbd.mtd = mtd;
76 inftl->mbd.devnum = -1; 74 inftl->mbd.devnum = -1;
@@ -133,7 +131,7 @@ static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
133{ 131{
134 struct INFTLrecord *inftl = (void *)dev; 132 struct INFTLrecord *inftl = (void *)dev;
135 133
136 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: remove_dev (i=%d)\n", dev->devnum); 134 pr_debug("INFTL: remove_dev (i=%d)\n", dev->devnum);
137 135
138 del_mtd_blktrans_dev(dev); 136 del_mtd_blktrans_dev(dev);
139 137
@@ -154,7 +152,7 @@ int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
154 struct mtd_oob_ops ops; 152 struct mtd_oob_ops ops;
155 int res; 153 int res;
156 154
157 ops.mode = MTD_OOB_PLACE; 155 ops.mode = MTD_OPS_PLACE_OOB;
158 ops.ooboffs = offs & (mtd->writesize - 1); 156 ops.ooboffs = offs & (mtd->writesize - 1);
159 ops.ooblen = len; 157 ops.ooblen = len;
160 ops.oobbuf = buf; 158 ops.oobbuf = buf;
@@ -174,7 +172,7 @@ int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
174 struct mtd_oob_ops ops; 172 struct mtd_oob_ops ops;
175 int res; 173 int res;
176 174
177 ops.mode = MTD_OOB_PLACE; 175 ops.mode = MTD_OPS_PLACE_OOB;
178 ops.ooboffs = offs & (mtd->writesize - 1); 176 ops.ooboffs = offs & (mtd->writesize - 1);
179 ops.ooblen = len; 177 ops.ooblen = len;
180 ops.oobbuf = buf; 178 ops.oobbuf = buf;
@@ -194,7 +192,7 @@ static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
194 struct mtd_oob_ops ops; 192 struct mtd_oob_ops ops;
195 int res; 193 int res;
196 194
197 ops.mode = MTD_OOB_PLACE; 195 ops.mode = MTD_OPS_PLACE_OOB;
198 ops.ooboffs = offs; 196 ops.ooboffs = offs;
199 ops.ooblen = mtd->oobsize; 197 ops.ooblen = mtd->oobsize;
200 ops.oobbuf = oob; 198 ops.oobbuf = oob;
@@ -215,16 +213,16 @@ static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate)
215 u16 pot = inftl->LastFreeEUN; 213 u16 pot = inftl->LastFreeEUN;
216 int silly = inftl->nb_blocks; 214 int silly = inftl->nb_blocks;
217 215
218 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findfreeblock(inftl=%p," 216 pr_debug("INFTL: INFTL_findfreeblock(inftl=%p,desperate=%d)\n",
219 "desperate=%d)\n", inftl, desperate); 217 inftl, desperate);
220 218
221 /* 219 /*
222 * Normally, we force a fold to happen before we run out of free 220 * Normally, we force a fold to happen before we run out of free
223 * blocks completely. 221 * blocks completely.
224 */ 222 */
225 if (!desperate && inftl->numfreeEUNs < 2) { 223 if (!desperate && inftl->numfreeEUNs < 2) {
226 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free " 224 pr_debug("INFTL: there are too few free EUNs (%d)\n",
227 "EUNs (%d)\n", inftl->numfreeEUNs); 225 inftl->numfreeEUNs);
228 return BLOCK_NIL; 226 return BLOCK_NIL;
229 } 227 }
230 228
@@ -259,8 +257,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
259 struct inftl_oob oob; 257 struct inftl_oob oob;
260 size_t retlen; 258 size_t retlen;
261 259
262 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d," 260 pr_debug("INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,pending=%d)\n",
263 "pending=%d)\n", inftl, thisVUC, pendingblock); 261 inftl, thisVUC, pendingblock);
264 262
265 memset(BlockMap, 0xff, sizeof(BlockMap)); 263 memset(BlockMap, 0xff, sizeof(BlockMap));
266 memset(BlockDeleted, 0, sizeof(BlockDeleted)); 264 memset(BlockDeleted, 0, sizeof(BlockDeleted));
@@ -323,8 +321,7 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
323 * Chain, and the Erase Unit into which we are supposed to be copying. 321 * Chain, and the Erase Unit into which we are supposed to be copying.
324 * Go for it. 322 * Go for it.
325 */ 323 */
326 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: folding chain %d into unit %d\n", 324 pr_debug("INFTL: folding chain %d into unit %d\n", thisVUC, targetEUN);
327 thisVUC, targetEUN);
328 325
329 for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) { 326 for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) {
330 unsigned char movebuf[SECTORSIZE]; 327 unsigned char movebuf[SECTORSIZE];
@@ -349,14 +346,13 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
349 ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) + 346 ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) +
350 (block * SECTORSIZE), SECTORSIZE, &retlen, 347 (block * SECTORSIZE), SECTORSIZE, &retlen,
351 movebuf); 348 movebuf);
352 if (ret < 0 && ret != -EUCLEAN) { 349 if (ret < 0 && !mtd_is_bitflip(ret)) {
353 ret = mtd->read(mtd, 350 ret = mtd->read(mtd,
354 (inftl->EraseSize * BlockMap[block]) + 351 (inftl->EraseSize * BlockMap[block]) +
355 (block * SECTORSIZE), SECTORSIZE, 352 (block * SECTORSIZE), SECTORSIZE,
356 &retlen, movebuf); 353 &retlen, movebuf);
357 if (ret != -EIO) 354 if (ret != -EIO)
358 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: error went " 355 pr_debug("INFTL: error went away on retry?\n");
359 "away on retry?\n");
360 } 356 }
361 memset(&oob, 0xff, sizeof(struct inftl_oob)); 357 memset(&oob, 0xff, sizeof(struct inftl_oob));
362 oob.b.Status = oob.b.Status1 = SECTOR_USED; 358 oob.b.Status = oob.b.Status1 = SECTOR_USED;
@@ -372,8 +368,7 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
372 * is important, by doing oldest first if we crash/reboot then it 368 * is important, by doing oldest first if we crash/reboot then it
373 * it is relatively simple to clean up the mess). 369 * it is relatively simple to clean up the mess).
374 */ 370 */
375 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: want to erase virtual chain %d\n", 371 pr_debug("INFTL: want to erase virtual chain %d\n", thisVUC);
376 thisVUC);
377 372
378 for (;;) { 373 for (;;) {
379 /* Find oldest unit in chain. */ 374 /* Find oldest unit in chain. */
@@ -421,7 +416,7 @@ static u16 INFTL_makefreeblock(struct INFTLrecord *inftl, unsigned pendingblock)
421 u16 ChainLength = 0, thislen; 416 u16 ChainLength = 0, thislen;
422 u16 chain, EUN; 417 u16 chain, EUN;
423 418
424 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_makefreeblock(inftl=%p," 419 pr_debug("INFTL: INFTL_makefreeblock(inftl=%p,"
425 "pending=%d)\n", inftl, pendingblock); 420 "pending=%d)\n", inftl, pendingblock);
426 421
427 for (chain = 0; chain < inftl->nb_blocks; chain++) { 422 for (chain = 0; chain < inftl->nb_blocks; chain++) {
@@ -484,8 +479,8 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
484 size_t retlen; 479 size_t retlen;
485 int silly, silly2 = 3; 480 int silly, silly2 = 3;
486 481
487 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findwriteunit(inftl=%p," 482 pr_debug("INFTL: INFTL_findwriteunit(inftl=%p,block=%d)\n",
488 "block=%d)\n", inftl, block); 483 inftl, block);
489 484
490 do { 485 do {
491 /* 486 /*
@@ -501,8 +496,8 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
501 blockofs, 8, &retlen, (char *)&bci); 496 blockofs, 8, &retlen, (char *)&bci);
502 497
503 status = bci.Status | bci.Status1; 498 status = bci.Status | bci.Status1;
504 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: status of block %d in " 499 pr_debug("INFTL: status of block %d in EUN %d is %x\n",
505 "EUN %d is %x\n", block , writeEUN, status); 500 block , writeEUN, status);
506 501
507 switch(status) { 502 switch(status) {
508 case SECTOR_FREE: 503 case SECTOR_FREE:
@@ -555,9 +550,9 @@ hitused:
555 * Hopefully we free something, lets try again. 550 * Hopefully we free something, lets try again.
556 * This time we are desperate... 551 * This time we are desperate...
557 */ 552 */
558 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: using desperate==1 " 553 pr_debug("INFTL: using desperate==1 to find free EUN "
559 "to find free EUN to accommodate write to " 554 "to accommodate write to VUC %d\n",
560 "VUC %d\n", thisVUC); 555 thisVUC);
561 writeEUN = INFTL_findfreeblock(inftl, 1); 556 writeEUN = INFTL_findfreeblock(inftl, 1);
562 if (writeEUN == BLOCK_NIL) { 557 if (writeEUN == BLOCK_NIL) {
563 /* 558 /*
@@ -647,7 +642,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
647 struct inftl_bci bci; 642 struct inftl_bci bci;
648 size_t retlen; 643 size_t retlen;
649 644
650 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_trydeletechain(inftl=%p," 645 pr_debug("INFTL: INFTL_trydeletechain(inftl=%p,"
651 "thisVUC=%d)\n", inftl, thisVUC); 646 "thisVUC=%d)\n", inftl, thisVUC);
652 647
653 memset(BlockUsed, 0, sizeof(BlockUsed)); 648 memset(BlockUsed, 0, sizeof(BlockUsed));
@@ -711,7 +706,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
711 * For each block in the chain free it and make it available 706 * For each block in the chain free it and make it available
712 * for future use. Erase from the oldest unit first. 707 * for future use. Erase from the oldest unit first.
713 */ 708 */
714 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: deleting empty VUC %d\n", thisVUC); 709 pr_debug("INFTL: deleting empty VUC %d\n", thisVUC);
715 710
716 for (;;) { 711 for (;;) {
717 u16 *prevEUN = &inftl->VUtable[thisVUC]; 712 u16 *prevEUN = &inftl->VUtable[thisVUC];
@@ -719,7 +714,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
719 714
720 /* If the chain is all gone already, we're done */ 715 /* If the chain is all gone already, we're done */
721 if (thisEUN == BLOCK_NIL) { 716 if (thisEUN == BLOCK_NIL) {
722 DEBUG(MTD_DEBUG_LEVEL2, "INFTL: Empty VUC %d for deletion was already absent\n", thisEUN); 717 pr_debug("INFTL: Empty VUC %d for deletion was already absent\n", thisEUN);
723 return; 718 return;
724 } 719 }
725 720
@@ -731,7 +726,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
731 thisEUN = *prevEUN; 726 thisEUN = *prevEUN;
732 } 727 }
733 728
734 DEBUG(MTD_DEBUG_LEVEL3, "Deleting EUN %d from VUC %d\n", 729 pr_debug("Deleting EUN %d from VUC %d\n",
735 thisEUN, thisVUC); 730 thisEUN, thisVUC);
736 731
737 if (INFTL_formatblock(inftl, thisEUN) < 0) { 732 if (INFTL_formatblock(inftl, thisEUN) < 0) {
@@ -767,7 +762,7 @@ static int INFTL_deleteblock(struct INFTLrecord *inftl, unsigned block)
767 size_t retlen; 762 size_t retlen;
768 struct inftl_bci bci; 763 struct inftl_bci bci;
769 764
770 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_deleteblock(inftl=%p," 765 pr_debug("INFTL: INFTL_deleteblock(inftl=%p,"
771 "block=%d)\n", inftl, block); 766 "block=%d)\n", inftl, block);
772 767
773 while (thisEUN < inftl->nb_blocks) { 768 while (thisEUN < inftl->nb_blocks) {
@@ -826,7 +821,7 @@ static int inftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
826 struct inftl_oob oob; 821 struct inftl_oob oob;
827 char *p, *pend; 822 char *p, *pend;
828 823
829 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=%p,block=%ld," 824 pr_debug("INFTL: inftl_writeblock(inftl=%p,block=%ld,"
830 "buffer=%p)\n", inftl, block, buffer); 825 "buffer=%p)\n", inftl, block, buffer);
831 826
832 /* Is block all zero? */ 827 /* Is block all zero? */
@@ -876,7 +871,7 @@ static int inftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
876 struct inftl_bci bci; 871 struct inftl_bci bci;
877 size_t retlen; 872 size_t retlen;
878 873
879 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=%p,block=%ld," 874 pr_debug("INFTL: inftl_readblock(inftl=%p,block=%ld,"
880 "buffer=%p)\n", inftl, block, buffer); 875 "buffer=%p)\n", inftl, block, buffer);
881 876
882 while (thisEUN < inftl->nb_blocks) { 877 while (thisEUN < inftl->nb_blocks) {
@@ -922,7 +917,7 @@ foundit:
922 int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer); 917 int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer);
923 918
924 /* Handle corrected bit flips gracefully */ 919 /* Handle corrected bit flips gracefully */
925 if (ret < 0 && ret != -EUCLEAN) 920 if (ret < 0 && !mtd_is_bitflip(ret))
926 return -EIO; 921 return -EIO;
927 } 922 }
928 return 0; 923 return 0;
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 104052e774b0..2ff601f816ce 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -53,7 +53,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
53 struct INFTLPartition *ip; 53 struct INFTLPartition *ip;
54 size_t retlen; 54 size_t retlen;
55 55
56 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl); 56 pr_debug("INFTL: find_boot_record(inftl=%p)\n", inftl);
57 57
58 /* 58 /*
59 * Assume logical EraseSize == physical erasesize for starting the 59 * Assume logical EraseSize == physical erasesize for starting the
@@ -139,24 +139,20 @@ static int find_boot_record(struct INFTLrecord *inftl)
139 mh->FormatFlags = le32_to_cpu(mh->FormatFlags); 139 mh->FormatFlags = le32_to_cpu(mh->FormatFlags);
140 mh->PercentUsed = le32_to_cpu(mh->PercentUsed); 140 mh->PercentUsed = le32_to_cpu(mh->PercentUsed);
141 141
142#ifdef CONFIG_MTD_DEBUG_VERBOSE 142 pr_debug("INFTL: Media Header ->\n"
143 if (CONFIG_MTD_DEBUG_VERBOSE >= 2) { 143 " bootRecordID = %s\n"
144 printk("INFTL: Media Header ->\n" 144 " NoOfBootImageBlocks = %d\n"
145 " bootRecordID = %s\n" 145 " NoOfBinaryPartitions = %d\n"
146 " NoOfBootImageBlocks = %d\n" 146 " NoOfBDTLPartitions = %d\n"
147 " NoOfBinaryPartitions = %d\n" 147 " BlockMultiplerBits = %d\n"
148 " NoOfBDTLPartitions = %d\n" 148 " FormatFlgs = %d\n"
149 " BlockMultiplerBits = %d\n" 149 " OsakVersion = 0x%x\n"
150 " FormatFlgs = %d\n" 150 " PercentUsed = %d\n",
151 " OsakVersion = 0x%x\n" 151 mh->bootRecordID, mh->NoOfBootImageBlocks,
152 " PercentUsed = %d\n", 152 mh->NoOfBinaryPartitions,
153 mh->bootRecordID, mh->NoOfBootImageBlocks, 153 mh->NoOfBDTLPartitions,
154 mh->NoOfBinaryPartitions, 154 mh->BlockMultiplierBits, mh->FormatFlags,
155 mh->NoOfBDTLPartitions, 155 mh->OsakVersion, mh->PercentUsed);
156 mh->BlockMultiplierBits, mh->FormatFlags,
157 mh->OsakVersion, mh->PercentUsed);
158 }
159#endif
160 156
161 if (mh->NoOfBDTLPartitions == 0) { 157 if (mh->NoOfBDTLPartitions == 0) {
162 printk(KERN_WARNING "INFTL: Media Header sanity check " 158 printk(KERN_WARNING "INFTL: Media Header sanity check "
@@ -200,19 +196,15 @@ static int find_boot_record(struct INFTLrecord *inftl)
200 ip->spareUnits = le32_to_cpu(ip->spareUnits); 196 ip->spareUnits = le32_to_cpu(ip->spareUnits);
201 ip->Reserved0 = le32_to_cpu(ip->Reserved0); 197 ip->Reserved0 = le32_to_cpu(ip->Reserved0);
202 198
203#ifdef CONFIG_MTD_DEBUG_VERBOSE 199 pr_debug(" PARTITION[%d] ->\n"
204 if (CONFIG_MTD_DEBUG_VERBOSE >= 2) { 200 " virtualUnits = %d\n"
205 printk(" PARTITION[%d] ->\n" 201 " firstUnit = %d\n"
206 " virtualUnits = %d\n" 202 " lastUnit = %d\n"
207 " firstUnit = %d\n" 203 " flags = 0x%x\n"
208 " lastUnit = %d\n" 204 " spareUnits = %d\n",
209 " flags = 0x%x\n" 205 i, ip->virtualUnits, ip->firstUnit,
210 " spareUnits = %d\n", 206 ip->lastUnit, ip->flags,
211 i, ip->virtualUnits, ip->firstUnit, 207 ip->spareUnits);
212 ip->lastUnit, ip->flags,
213 ip->spareUnits);
214 }
215#endif
216 208
217 if (ip->Reserved0 != ip->firstUnit) { 209 if (ip->Reserved0 != ip->firstUnit) {
218 struct erase_info *instr = &inftl->instr; 210 struct erase_info *instr = &inftl->instr;
@@ -375,7 +367,7 @@ static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
375 * 367 *
376 * Return: 0 when succeed, -1 on error. 368 * Return: 0 when succeed, -1 on error.
377 * 369 *
378 * ToDo: 1. Is it neceressary to check_free_sector after erasing ?? 370 * ToDo: 1. Is it necessary to check_free_sector after erasing ??
379 */ 371 */
380int INFTL_formatblock(struct INFTLrecord *inftl, int block) 372int INFTL_formatblock(struct INFTLrecord *inftl, int block)
381{ 373{
@@ -385,8 +377,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
385 struct mtd_info *mtd = inftl->mbd.mtd; 377 struct mtd_info *mtd = inftl->mbd.mtd;
386 int physblock; 378 int physblock;
387 379
388 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=%p," 380 pr_debug("INFTL: INFTL_formatblock(inftl=%p,block=%d)\n", inftl, block);
389 "block=%d)\n", inftl, block);
390 381
391 memset(instr, 0, sizeof(struct erase_info)); 382 memset(instr, 0, sizeof(struct erase_info));
392 383
@@ -476,30 +467,30 @@ void INFTL_dumptables(struct INFTLrecord *s)
476{ 467{
477 int i; 468 int i;
478 469
479 printk("-------------------------------------------" 470 pr_debug("-------------------------------------------"
480 "----------------------------------\n"); 471 "----------------------------------\n");
481 472
482 printk("VUtable[%d] ->", s->nb_blocks); 473 pr_debug("VUtable[%d] ->", s->nb_blocks);
483 for (i = 0; i < s->nb_blocks; i++) { 474 for (i = 0; i < s->nb_blocks; i++) {
484 if ((i % 8) == 0) 475 if ((i % 8) == 0)
485 printk("\n%04x: ", i); 476 pr_debug("\n%04x: ", i);
486 printk("%04x ", s->VUtable[i]); 477 pr_debug("%04x ", s->VUtable[i]);
487 } 478 }
488 479
489 printk("\n-------------------------------------------" 480 pr_debug("\n-------------------------------------------"
490 "----------------------------------\n"); 481 "----------------------------------\n");
491 482
492 printk("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks); 483 pr_debug("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks);
493 for (i = 0; i <= s->lastEUN; i++) { 484 for (i = 0; i <= s->lastEUN; i++) {
494 if ((i % 8) == 0) 485 if ((i % 8) == 0)
495 printk("\n%04x: ", i); 486 pr_debug("\n%04x: ", i);
496 printk("%04x ", s->PUtable[i]); 487 pr_debug("%04x ", s->PUtable[i]);
497 } 488 }
498 489
499 printk("\n-------------------------------------------" 490 pr_debug("\n-------------------------------------------"
500 "----------------------------------\n"); 491 "----------------------------------\n");
501 492
502 printk("INFTL ->\n" 493 pr_debug("INFTL ->\n"
503 " EraseSize = %d\n" 494 " EraseSize = %d\n"
504 " h/s/c = %d/%d/%d\n" 495 " h/s/c = %d/%d/%d\n"
505 " numvunits = %d\n" 496 " numvunits = %d\n"
@@ -513,7 +504,7 @@ void INFTL_dumptables(struct INFTLrecord *s)
513 s->numvunits, s->firstEUN, s->lastEUN, s->numfreeEUNs, 504 s->numvunits, s->firstEUN, s->lastEUN, s->numfreeEUNs,
514 s->LastFreeEUN, s->nb_blocks, s->nb_boot_blocks); 505 s->LastFreeEUN, s->nb_blocks, s->nb_boot_blocks);
515 506
516 printk("\n-------------------------------------------" 507 pr_debug("\n-------------------------------------------"
517 "----------------------------------\n"); 508 "----------------------------------\n");
518} 509}
519 510
@@ -521,25 +512,25 @@ void INFTL_dumpVUchains(struct INFTLrecord *s)
521{ 512{
522 int logical, block, i; 513 int logical, block, i;
523 514
524 printk("-------------------------------------------" 515 pr_debug("-------------------------------------------"
525 "----------------------------------\n"); 516 "----------------------------------\n");
526 517
527 printk("INFTL Virtual Unit Chains:\n"); 518 pr_debug("INFTL Virtual Unit Chains:\n");
528 for (logical = 0; logical < s->nb_blocks; logical++) { 519 for (logical = 0; logical < s->nb_blocks; logical++) {
529 block = s->VUtable[logical]; 520 block = s->VUtable[logical];
530 if (block > s->nb_blocks) 521 if (block > s->nb_blocks)
531 continue; 522 continue;
532 printk(" LOGICAL %d --> %d ", logical, block); 523 pr_debug(" LOGICAL %d --> %d ", logical, block);
533 for (i = 0; i < s->nb_blocks; i++) { 524 for (i = 0; i < s->nb_blocks; i++) {
534 if (s->PUtable[block] == BLOCK_NIL) 525 if (s->PUtable[block] == BLOCK_NIL)
535 break; 526 break;
536 block = s->PUtable[block]; 527 block = s->PUtable[block];
537 printk("%d ", block); 528 pr_debug("%d ", block);
538 } 529 }
539 printk("\n"); 530 pr_debug("\n");
540 } 531 }
541 532
542 printk("-------------------------------------------" 533 pr_debug("-------------------------------------------"
543 "----------------------------------\n"); 534 "----------------------------------\n");
544} 535}
545 536
@@ -555,7 +546,7 @@ int INFTL_mount(struct INFTLrecord *s)
555 int i; 546 int i;
556 u8 *ANACtable, ANAC; 547 u8 *ANACtable, ANAC;
557 548
558 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_mount(inftl=%p)\n", s); 549 pr_debug("INFTL: INFTL_mount(inftl=%p)\n", s);
559 550
560 /* Search for INFTL MediaHeader and Spare INFTL Media Header */ 551 /* Search for INFTL MediaHeader and Spare INFTL Media Header */
561 if (find_boot_record(s) < 0) { 552 if (find_boot_record(s) < 0) {
@@ -585,7 +576,7 @@ int INFTL_mount(struct INFTLrecord *s)
585 * NOTEXPLORED state. Then at the end we will try to format it and 576 * NOTEXPLORED state. Then at the end we will try to format it and
586 * mark it as free. 577 * mark it as free.
587 */ 578 */
588 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 1, explore each unit\n"); 579 pr_debug("INFTL: pass 1, explore each unit\n");
589 for (first_block = s->firstEUN; first_block <= s->lastEUN; first_block++) { 580 for (first_block = s->firstEUN; first_block <= s->lastEUN; first_block++) {
590 if (s->PUtable[first_block] != BLOCK_NOTEXPLORED) 581 if (s->PUtable[first_block] != BLOCK_NOTEXPLORED)
591 continue; 582 continue;
@@ -717,17 +708,14 @@ int INFTL_mount(struct INFTLrecord *s)
717 logical_block = BLOCK_NIL; 708 logical_block = BLOCK_NIL;
718 } 709 }
719 710
720#ifdef CONFIG_MTD_DEBUG_VERBOSE 711 INFTL_dumptables(s);
721 if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
722 INFTL_dumptables(s);
723#endif
724 712
725 /* 713 /*
726 * Second pass, check for infinite loops in chains. These are 714 * Second pass, check for infinite loops in chains. These are
727 * possible because we don't update the previous pointers when 715 * possible because we don't update the previous pointers when
728 * we fold chains. No big deal, just fix them up in PUtable. 716 * we fold chains. No big deal, just fix them up in PUtable.
729 */ 717 */
730 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 2, validate virtual chains\n"); 718 pr_debug("INFTL: pass 2, validate virtual chains\n");
731 for (logical_block = 0; logical_block < s->numvunits; logical_block++) { 719 for (logical_block = 0; logical_block < s->numvunits; logical_block++) {
732 block = s->VUtable[logical_block]; 720 block = s->VUtable[logical_block];
733 last_block = BLOCK_NIL; 721 last_block = BLOCK_NIL;
@@ -772,12 +760,8 @@ int INFTL_mount(struct INFTLrecord *s)
772 } 760 }
773 } 761 }
774 762
775#ifdef CONFIG_MTD_DEBUG_VERBOSE 763 INFTL_dumptables(s);
776 if (CONFIG_MTD_DEBUG_VERBOSE >= 2) 764 INFTL_dumpVUchains(s);
777 INFTL_dumptables(s);
778 if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
779 INFTL_dumpVUchains(s);
780#endif
781 765
782 /* 766 /*
783 * Third pass, format unreferenced blocks and init free block count. 767 * Third pass, format unreferenced blocks and init free block count.
@@ -785,7 +769,7 @@ int INFTL_mount(struct INFTLrecord *s)
785 s->numfreeEUNs = 0; 769 s->numfreeEUNs = 0;
786 s->LastFreeEUN = BLOCK_NIL; 770 s->LastFreeEUN = BLOCK_NIL;
787 771
788 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 3, format unused blocks\n"); 772 pr_debug("INFTL: pass 3, format unused blocks\n");
789 for (block = s->firstEUN; block <= s->lastEUN; block++) { 773 for (block = s->firstEUN; block <= s->lastEUN; block++) {
790 if (s->PUtable[block] == BLOCK_NOTEXPLORED) { 774 if (s->PUtable[block] == BLOCK_NOTEXPLORED) {
791 printk("INFTL: unreferenced block %d, formatting it\n", 775 printk("INFTL: unreferenced block %d, formatting it\n",
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index c0c328c5b133..8e0c4bf9f7fb 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -41,8 +41,6 @@ config MTD_PHYSMAP_START
41 are mapped on your particular target board. Refer to the 41 are mapped on your particular target board. Refer to the
42 memory map which should hopefully be in the documentation for 42 memory map which should hopefully be in the documentation for
43 your board. 43 your board.
44 Ignore this option if you use run-time physmap configuration
45 (i.e., run-time calling physmap_configure()).
46 44
47config MTD_PHYSMAP_LEN 45config MTD_PHYSMAP_LEN
48 hex "Physical length of flash mapping" 46 hex "Physical length of flash mapping"
@@ -55,8 +53,6 @@ config MTD_PHYSMAP_LEN
55 than the total amount of flash present. Refer to the memory 53 than the total amount of flash present. Refer to the memory
56 map which should hopefully be in the documentation for your 54 map which should hopefully be in the documentation for your
57 board. 55 board.
58 Ignore this option if you use run-time physmap configuration
59 (i.e., run-time calling physmap_configure()).
60 56
61config MTD_PHYSMAP_BANKWIDTH 57config MTD_PHYSMAP_BANKWIDTH
62 int "Bank width in octets" 58 int "Bank width in octets"
@@ -67,8 +63,6 @@ config MTD_PHYSMAP_BANKWIDTH
67 in octets. For example, if you have a data bus width of 32 63 in octets. For example, if you have a data bus width of 32
68 bits, you would set the bus width octet value to 4. This is 64 bits, you would set the bus width octet value to 4. This is
69 used internally by the CFI drivers. 65 used internally by the CFI drivers.
70 Ignore this option if you use run-time physmap configuration
71 (i.e., run-time calling physmap_configure()).
72 66
73config MTD_PHYSMAP_OF 67config MTD_PHYSMAP_OF
74 tristate "Flash device in physical memory map based on OF description" 68 tristate "Flash device in physical memory map based on OF description"
@@ -260,7 +254,6 @@ config MTD_BCM963XX
260config MTD_LANTIQ 254config MTD_LANTIQ
261 tristate "Lantiq SoC NOR support" 255 tristate "Lantiq SoC NOR support"
262 depends on LANTIQ 256 depends on LANTIQ
263 select MTD_PARTITIONS
264 help 257 help
265 Support for NOR flash attached to the Lantiq SoC's External Bus Unit. 258 Support for NOR flash attached to the Lantiq SoC's External Bus Unit.
266 259
@@ -339,10 +332,6 @@ config MTD_SOLUTIONENGINE
339 This enables access to the flash chips on the Hitachi SolutionEngine and 332 This enables access to the flash chips on the Hitachi SolutionEngine and
340 similar boards. Say 'Y' if you are building a kernel for such a board. 333 similar boards. Say 'Y' if you are building a kernel for such a board.
341 334
342config MTD_ARM_INTEGRATOR
343 tristate "CFI Flash device mapped on ARM Integrator/P720T"
344 depends on ARM && MTD_CFI
345
346config MTD_CDB89712 335config MTD_CDB89712
347 tristate "Cirrus CDB89712 evaluation board mappings" 336 tristate "Cirrus CDB89712 evaluation board mappings"
348 depends on MTD_CFI && ARCH_CDB89712 337 depends on MTD_CFI && ARCH_CDB89712
@@ -398,13 +387,6 @@ config MTD_AUTCPU12
398 This enables access to the NV-RAM on autronix autcpu12 board. 387 This enables access to the NV-RAM on autronix autcpu12 board.
399 If you have such a board, say 'Y'. 388 If you have such a board, say 'Y'.
400 389
401config MTD_EDB7312
402 tristate "CFI Flash device mapped on EDB7312"
403 depends on ARCH_EDB7312 && MTD_CFI
404 help
405 This enables access to the CFI Flash on the Cogent EDB7312 board.
406 If you have such a board, say 'Y' here.
407
408config MTD_IMPA7 390config MTD_IMPA7
409 tristate "JEDEC Flash device mapped on impA7" 391 tristate "JEDEC Flash device mapped on impA7"
410 depends on ARM && MTD_JEDECPROBE 392 depends on ARM && MTD_JEDECPROBE
@@ -412,14 +394,6 @@ config MTD_IMPA7
412 This enables access to the NOR Flash on the impA7 board of 394 This enables access to the NOR Flash on the impA7 board of
413 implementa GmbH. If you have such a board, say 'Y' here. 395 implementa GmbH. If you have such a board, say 'Y' here.
414 396
415config MTD_CEIVA
416 tristate "JEDEC Flash device mapped on Ceiva/Polaroid PhotoMax Digital Picture Frame"
417 depends on MTD_JEDECPROBE && ARCH_CEIVA
418 help
419 This enables access to the flash chips on the Ceiva/Polaroid
420 PhotoMax Digital Picture Frame.
421 If you have such a device, say 'Y'.
422
423config MTD_H720X 397config MTD_H720X
424 tristate "Hynix evaluation board mappings" 398 tristate "Hynix evaluation board mappings"
425 depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 ) 399 depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 )
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index cb48b11affff..45dcb8b14f22 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
19obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o 19obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
20obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o 20obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
21obj-$(CONFIG_MTD_MBX860) += mbx860.o 21obj-$(CONFIG_MTD_MBX860) += mbx860.o
22obj-$(CONFIG_MTD_CEIVA) += ceiva.o
23obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o 22obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
24obj-$(CONFIG_MTD_PHYSMAP) += physmap.o 23obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
25obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o 24obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
@@ -40,7 +39,6 @@ obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o
40obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o 39obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
41obj-$(CONFIG_MTD_PCI) += pci.o 40obj-$(CONFIG_MTD_PCI) += pci.o
42obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o 41obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
43obj-$(CONFIG_MTD_EDB7312) += edb7312.o
44obj-$(CONFIG_MTD_IMPA7) += impa7.o 42obj-$(CONFIG_MTD_IMPA7) += impa7.o
45obj-$(CONFIG_MTD_FORTUNET) += fortunet.o 43obj-$(CONFIG_MTD_FORTUNET) += fortunet.o
46obj-$(CONFIG_MTD_UCLINUX) += uclinux.o 44obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
index 608967fe74c6..736ca10ca9f1 100644
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/module.h>
24#include <linux/mtd/map.h> 25#include <linux/mtd/map.h>
25#include <linux/mtd/mtd.h> 26#include <linux/mtd/mtd.h>
26#include <linux/mtd/partitions.h> 27#include <linux/mtd/partitions.h>
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 67815eed2f00..6d6b2b5674ee 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -41,7 +41,6 @@ struct async_state {
41 uint32_t flash_ambctl0, flash_ambctl1; 41 uint32_t flash_ambctl0, flash_ambctl1;
42 uint32_t save_ambctl0, save_ambctl1; 42 uint32_t save_ambctl0, save_ambctl1;
43 unsigned long irq_flags; 43 unsigned long irq_flags;
44 struct mtd_partition *parts;
45}; 44};
46 45
47static void switch_to_flash(struct async_state *state) 46static void switch_to_flash(struct async_state *state)
@@ -165,18 +164,8 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
165 return -ENXIO; 164 return -ENXIO;
166 } 165 }
167 166
168 ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0); 167 mtd_device_parse_register(state->mtd, part_probe_types, 0,
169 if (ret > 0) { 168 pdata->parts, pdata->nr_parts);
170 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
171 mtd_device_register(state->mtd, pdata->parts, ret);
172 state->parts = pdata->parts;
173 } else if (pdata->nr_parts) {
174 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
175 mtd_device_register(state->mtd, pdata->parts, pdata->nr_parts);
176 } else {
177 pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n");
178 mtd_device_register(state->mtd, NULL, 0);
179 }
180 169
181 platform_set_drvdata(pdev, state); 170 platform_set_drvdata(pdev, state);
182 171
@@ -188,7 +177,6 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev)
188 struct async_state *state = platform_get_drvdata(pdev); 177 struct async_state *state = platform_get_drvdata(pdev);
189 gpio_free(state->enet_flash_pin); 178 gpio_free(state->enet_flash_pin);
190 mtd_device_unregister(state->mtd); 179 mtd_device_unregister(state->mtd);
191 kfree(state->parts);
192 map_destroy(state->mtd); 180 map_destroy(state->mtd);
193 kfree(state); 181 kfree(state);
194 return 0; 182 return 0;
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
deleted file mode 100644
index 06f9c9815720..000000000000
--- a/drivers/mtd/maps/ceiva.c
+++ /dev/null
@@ -1,341 +0,0 @@
1/*
2 * Ceiva flash memory driver.
3 * Copyright (C) 2002 Rob Scott <rscott@mtrob.fdns.net>
4 *
5 * Note: this driver supports jedec compatible devices. Modification
6 * for CFI compatible devices should be straight forward: change
7 * jedec_probe to cfi_probe.
8 *
9 * Based on: sa1100-flash.c, which has the following copyright:
10 * Flash memory access on SA11x0 based devices
11 *
12 * (C) 2000 Nicolas Pitre <nico@fluxnic.net>
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/ioport.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/slab.h>
22
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/map.h>
25#include <linux/mtd/partitions.h>
26#include <linux/mtd/concat.h>
27
28#include <mach/hardware.h>
29#include <asm/mach-types.h>
30#include <asm/io.h>
31#include <asm/sizes.h>
32
33/*
34 * This isn't complete yet, so...
35 */
36#define CONFIG_MTD_CEIVA_STATICMAP
37
38#ifdef CONFIG_MTD_CEIVA_STATICMAP
39/*
40 * See include/linux/mtd/partitions.h for definition of the mtd_partition
41 * structure.
42 *
43 * Please note:
44 * 1. The flash size given should be the largest flash size that can
45 * be accommodated.
46 *
47 * 2. The bus width must defined in clps_setup_flash.
48 *
49 * The MTD layer will detect flash chip aliasing and reduce the size of
50 * the map accordingly.
51 *
52 */
53
54#ifdef CONFIG_ARCH_CEIVA
55/* Flash / Partition sizing */
56/* For the 28F8003, we use the block mapping to calcuate the sizes */
57#define MAX_SIZE_KiB (16 + 8 + 8 + 96 + (7*128))
58#define BOOT_PARTITION_SIZE_KiB (16)
59#define PARAMS_PARTITION_SIZE_KiB (8)
60#define KERNEL_PARTITION_SIZE_KiB (4*128)
61/* Use both remaining portion of first flash, and all of second flash */
62#define ROOT_PARTITION_SIZE_KiB (3*128) + (8*128)
63
64static struct mtd_partition ceiva_partitions[] = {
65 {
66 .name = "Ceiva BOOT partition",
67 .size = BOOT_PARTITION_SIZE_KiB*1024,
68 .offset = 0,
69
70 },{
71 .name = "Ceiva parameters partition",
72 .size = PARAMS_PARTITION_SIZE_KiB*1024,
73 .offset = (16 + 8) * 1024,
74 },{
75 .name = "Ceiva kernel partition",
76 .size = (KERNEL_PARTITION_SIZE_KiB)*1024,
77 .offset = 0x20000,
78
79 },{
80 .name = "Ceiva root filesystem partition",
81 .offset = MTDPART_OFS_APPEND,
82 .size = (ROOT_PARTITION_SIZE_KiB)*1024,
83 }
84};
85#endif
86
87static int __init clps_static_partitions(struct mtd_partition **parts)
88{
89 int nb_parts = 0;
90
91#ifdef CONFIG_ARCH_CEIVA
92 if (machine_is_ceiva()) {
93 *parts = ceiva_partitions;
94 nb_parts = ARRAY_SIZE(ceiva_partitions);
95 }
96#endif
97 return nb_parts;
98}
99#endif
100
101struct clps_info {
102 unsigned long base;
103 unsigned long size;
104 int width;
105 void *vbase;
106 struct map_info *map;
107 struct mtd_info *mtd;
108 struct resource *res;
109};
110
111#define NR_SUBMTD 4
112
113static struct clps_info info[NR_SUBMTD];
114
115static int __init clps_setup_mtd(struct clps_info *clps, int nr, struct mtd_info **rmtd)
116{
117 struct mtd_info *subdev[nr];
118 struct map_info *maps;
119 int i, found = 0, ret = 0;
120
121 /*
122 * Allocate the map_info structs in one go.
123 */
124 maps = kzalloc(sizeof(struct map_info) * nr, GFP_KERNEL);
125 if (!maps)
126 return -ENOMEM;
127 /*
128 * Claim and then map the memory regions.
129 */
130 for (i = 0; i < nr; i++) {
131 if (clps[i].base == (unsigned long)-1)
132 break;
133
134 clps[i].res = request_mem_region(clps[i].base, clps[i].size, "clps flash");
135 if (!clps[i].res) {
136 ret = -EBUSY;
137 break;
138 }
139
140 clps[i].map = maps + i;
141
142 clps[i].map->name = "clps flash";
143 clps[i].map->phys = clps[i].base;
144
145 clps[i].vbase = ioremap(clps[i].base, clps[i].size);
146 if (!clps[i].vbase) {
147 ret = -ENOMEM;
148 break;
149 }
150
151 clps[i].map->virt = (void __iomem *)clps[i].vbase;
152 clps[i].map->bankwidth = clps[i].width;
153 clps[i].map->size = clps[i].size;
154
155 simple_map_init(&clps[i].map);
156
157 clps[i].mtd = do_map_probe("jedec_probe", clps[i].map);
158 if (clps[i].mtd == NULL) {
159 ret = -ENXIO;
160 break;
161 }
162 clps[i].mtd->owner = THIS_MODULE;
163 subdev[i] = clps[i].mtd;
164
165 printk(KERN_INFO "clps flash: JEDEC device at 0x%08lx, %dMiB, "
166 "%d-bit\n", clps[i].base, clps[i].mtd->size >> 20,
167 clps[i].width * 8);
168 found += 1;
169 }
170
171 /*
172 * ENXIO is special. It means we didn't find a chip when
173 * we probed. We need to tear down the mapping, free the
174 * resource and mark it as such.
175 */
176 if (ret == -ENXIO) {
177 iounmap(clps[i].vbase);
178 clps[i].vbase = NULL;
179 release_resource(clps[i].res);
180 clps[i].res = NULL;
181 }
182
183 /*
184 * If we found one device, don't bother with concat support.
185 * If we found multiple devices, use concat if we have it
186 * available, otherwise fail.
187 */
188 if (ret == 0 || ret == -ENXIO) {
189 if (found == 1) {
190 *rmtd = subdev[0];
191 ret = 0;
192 } else if (found > 1) {
193 /*
194 * We detected multiple devices. Concatenate
195 * them together.
196 */
197 *rmtd = mtd_concat_create(subdev, found,
198 "clps flash");
199 if (*rmtd == NULL)
200 ret = -ENXIO;
201 }
202 }
203
204 /*
205 * If we failed, clean up.
206 */
207 if (ret) {
208 do {
209 if (clps[i].mtd)
210 map_destroy(clps[i].mtd);
211 if (clps[i].vbase)
212 iounmap(clps[i].vbase);
213 if (clps[i].res)
214 release_resource(clps[i].res);
215 } while (i--);
216
217 kfree(maps);
218 }
219
220 return ret;
221}
222
223static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd)
224{
225 int i;
226
227 mtd_device_unregister(mtd);
228
229 if (mtd != clps[0].mtd)
230 mtd_concat_destroy(mtd);
231
232 for (i = NR_SUBMTD; i >= 0; i--) {
233 if (clps[i].mtd)
234 map_destroy(clps[i].mtd);
235 if (clps[i].vbase)
236 iounmap(clps[i].vbase);
237 if (clps[i].res)
238 release_resource(clps[i].res);
239 }
240 kfree(clps[0].map);
241}
242
243/*
244 * We define the memory space, size, and width for the flash memory
245 * space here.
246 */
247
248static int __init clps_setup_flash(void)
249{
250 int nr = 0;
251
252#ifdef CONFIG_ARCH_CEIVA
253 if (machine_is_ceiva()) {
254 info[0].base = CS0_PHYS_BASE;
255 info[0].size = SZ_32M;
256 info[0].width = CEIVA_FLASH_WIDTH;
257 info[1].base = CS1_PHYS_BASE;
258 info[1].size = SZ_32M;
259 info[1].width = CEIVA_FLASH_WIDTH;
260 nr = 2;
261 }
262#endif
263 return nr;
264}
265
266static struct mtd_partition *parsed_parts;
267static const char *probes[] = { "cmdlinepart", "RedBoot", NULL };
268
269static void __init clps_locate_partitions(struct mtd_info *mtd)
270{
271 const char *part_type = NULL;
272 int nr_parts = 0;
273 do {
274 /*
275 * Partition selection stuff.
276 */
277 nr_parts = parse_mtd_partitions(mtd, probes, &parsed_parts, 0);
278 if (nr_parts > 0) {
279 part_type = "command line";
280 break;
281 }
282#ifdef CONFIG_MTD_CEIVA_STATICMAP
283 nr_parts = clps_static_partitions(&parsed_parts);
284 if (nr_parts > 0) {
285 part_type = "static";
286 break;
287 }
288 printk("found: %d partitions\n", nr_parts);
289#endif
290 } while (0);
291
292 if (nr_parts == 0) {
293 printk(KERN_NOTICE "clps flash: no partition info "
294 "available, registering whole flash\n");
295 mtd_device_register(mtd, NULL, 0);
296 } else {
297 printk(KERN_NOTICE "clps flash: using %s partition "
298 "definition\n", part_type);
299 mtd_device_register(mtd, parsed_parts, nr_parts);
300 }
301
302 /* Always succeeds. */
303}
304
305static void __exit clps_destroy_partitions(void)
306{
307 kfree(parsed_parts);
308}
309
310static struct mtd_info *mymtd;
311
312static int __init clps_mtd_init(void)
313{
314 int ret;
315 int nr;
316
317 nr = clps_setup_flash();
318 if (nr < 0)
319 return nr;
320
321 ret = clps_setup_mtd(info, nr, &mymtd);
322 if (ret)
323 return ret;
324
325 clps_locate_partitions(mymtd);
326
327 return 0;
328}
329
330static void __exit clps_mtd_cleanup(void)
331{
332 clps_destroy_mtd(info, mymtd);
333 clps_destroy_partitions();
334}
335
336module_init(clps_mtd_init);
337module_exit(clps_mtd_cleanup);
338
339MODULE_AUTHOR("Rob Scott");
340MODULE_DESCRIPTION("Cirrus Logic JEDEC map driver");
341MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index 7a9e1989c977..f43b365b848c 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -145,14 +145,10 @@ static struct map_info dc21285_map = {
145 145
146 146
147/* Partition stuff */ 147/* Partition stuff */
148static struct mtd_partition *dc21285_parts;
149static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; 148static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
150 149
151static int __init init_dc21285(void) 150static int __init init_dc21285(void)
152{ 151{
153
154 int nrparts;
155
156 /* Determine bankwidth */ 152 /* Determine bankwidth */
157 switch (*CSR_SA110_CNTL & (3<<14)) { 153 switch (*CSR_SA110_CNTL & (3<<14)) {
158 case SA110_CNTL_ROMWIDTH_8: 154 case SA110_CNTL_ROMWIDTH_8:
@@ -200,8 +196,7 @@ static int __init init_dc21285(void)
200 196
201 dc21285_mtd->owner = THIS_MODULE; 197 dc21285_mtd->owner = THIS_MODULE;
202 198
203 nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0); 199 mtd_device_parse_register(dc21285_mtd, probes, 0, NULL, 0);
204 mtd_device_register(dc21285_mtd, dc21285_parts, nrparts);
205 200
206 if(machine_is_ebsa285()) { 201 if(machine_is_ebsa285()) {
207 /* 202 /*
@@ -224,8 +219,6 @@ static int __init init_dc21285(void)
224static void __exit cleanup_dc21285(void) 219static void __exit cleanup_dc21285(void)
225{ 220{
226 mtd_device_unregister(dc21285_mtd); 221 mtd_device_unregister(dc21285_mtd);
227 if (dc21285_parts)
228 kfree(dc21285_parts);
229 map_destroy(dc21285_mtd); 222 map_destroy(dc21285_mtd);
230 iounmap(dc21285_map.virt); 223 iounmap(dc21285_map.virt);
231} 224}
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
deleted file mode 100644
index fe42a212bb3e..000000000000
--- a/drivers/mtd/maps/edb7312.c
+++ /dev/null
@@ -1,134 +0,0 @@
1/*
2 * Handle mapping of the NOR flash on Cogent EDB7312 boards
3 *
4 * Copyright 2002 SYSGO Real-Time Solutions GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <asm/io.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h>
18#include <linux/mtd/partitions.h>
19
20#define WINDOW_ADDR 0x00000000 /* physical properties of flash */
21#define WINDOW_SIZE 0x01000000
22#define BUSWIDTH 2
23#define FLASH_BLOCKSIZE_MAIN 0x20000
24#define FLASH_NUMBLOCKS_MAIN 128
25/* can be "cfi_probe", "jedec_probe", "map_rom", NULL }; */
26#define PROBETYPES { "cfi_probe", NULL }
27
28#define MSG_PREFIX "EDB7312-NOR:" /* prefix for our printk()'s */
29#define MTDID "edb7312-nor" /* for mtdparts= partitioning */
30
31static struct mtd_info *mymtd;
32
33struct map_info edb7312nor_map = {
34 .name = "NOR flash on EDB7312",
35 .size = WINDOW_SIZE,
36 .bankwidth = BUSWIDTH,
37 .phys = WINDOW_ADDR,
38};
39
40/*
41 * MTD partitioning stuff
42 */
43static struct mtd_partition static_partitions[3] =
44{
45 {
46 .name = "ARMboot",
47 .size = 0x40000,
48 .offset = 0
49 },
50 {
51 .name = "Kernel",
52 .size = 0x200000,
53 .offset = 0x40000
54 },
55 {
56 .name = "RootFS",
57 .size = 0xDC0000,
58 .offset = 0x240000
59 },
60};
61
62static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
63
64static int mtd_parts_nb = 0;
65static struct mtd_partition *mtd_parts = 0;
66
67static int __init init_edb7312nor(void)
68{
69 static const char *rom_probe_types[] = PROBETYPES;
70 const char **type;
71 const char *part_type = 0;
72
73 printk(KERN_NOTICE MSG_PREFIX "0x%08x at 0x%08x\n",
74 WINDOW_SIZE, WINDOW_ADDR);
75 edb7312nor_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
76
77 if (!edb7312nor_map.virt) {
78 printk(MSG_PREFIX "failed to ioremap\n");
79 return -EIO;
80 }
81
82 simple_map_init(&edb7312nor_map);
83
84 mymtd = 0;
85 type = rom_probe_types;
86 for(; !mymtd && *type; type++) {
87 mymtd = do_map_probe(*type, &edb7312nor_map);
88 }
89 if (mymtd) {
90 mymtd->owner = THIS_MODULE;
91
92 mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, MTDID);
93 if (mtd_parts_nb > 0)
94 part_type = "detected";
95
96 if (mtd_parts_nb == 0) {
97 mtd_parts = static_partitions;
98 mtd_parts_nb = ARRAY_SIZE(static_partitions);
99 part_type = "static";
100 }
101
102 if (mtd_parts_nb == 0)
103 printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
104 else
105 printk(KERN_NOTICE MSG_PREFIX
106 "using %s partition definition\n", part_type);
107 /* Register the whole device first. */
108 mtd_device_register(mymtd, NULL, 0);
109 mtd_device_register(mymtd, mtd_parts, mtd_parts_nb);
110 return 0;
111 }
112
113 iounmap((void *)edb7312nor_map.virt);
114 return -ENXIO;
115}
116
117static void __exit cleanup_edb7312nor(void)
118{
119 if (mymtd) {
120 mtd_device_unregister(mymtd);
121 map_destroy(mymtd);
122 }
123 if (edb7312nor_map.virt) {
124 iounmap((void *)edb7312nor_map.virt);
125 edb7312nor_map.virt = 0;
126 }
127}
128
129module_init(init_edb7312nor);
130module_exit(cleanup_edb7312nor);
131
132MODULE_LICENSE("GPL");
133MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
134MODULE_DESCRIPTION("Generic configurable MTD map driver");
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 7568c5f8b8ae..1ec66f031c51 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -187,7 +187,6 @@ static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
187 */ 187 */
188static int __devinit gpio_flash_probe(struct platform_device *pdev) 188static int __devinit gpio_flash_probe(struct platform_device *pdev)
189{ 189{
190 int nr_parts;
191 size_t i, arr_size; 190 size_t i, arr_size;
192 struct physmap_flash_data *pdata; 191 struct physmap_flash_data *pdata;
193 struct resource *memory; 192 struct resource *memory;
@@ -252,20 +251,9 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
252 return -ENXIO; 251 return -ENXIO;
253 } 252 }
254 253
255 nr_parts = parse_mtd_partitions(state->mtd, part_probe_types,
256 &pdata->parts, 0);
257 if (nr_parts > 0) {
258 pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n");
259 kfree(pdata->parts);
260 } else if (pdata->nr_parts) {
261 pr_devinit(KERN_NOTICE PFX "Using board partition definition\n");
262 nr_parts = pdata->nr_parts;
263 } else {
264 pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n");
265 nr_parts = 0;
266 }
267 254
268 mtd_device_register(state->mtd, pdata->parts, nr_parts); 255 mtd_device_parse_register(state->mtd, part_probe_types, 0,
256 pdata->parts, pdata->nr_parts);
269 257
270 return 0; 258 return 0;
271} 259}
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 7f035860a36b..49c14187fc66 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -58,18 +58,11 @@ static struct mtd_partition h720x_partitions[] = {
58 58
59#define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions) 59#define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions)
60 60
61static int nr_mtd_parts;
62static struct mtd_partition *mtd_parts;
63static const char *probes[] = { "cmdlinepart", NULL };
64
65/* 61/*
66 * Initialize FLASH support 62 * Initialize FLASH support
67 */ 63 */
68static int __init h720x_mtd_init(void) 64static int __init h720x_mtd_init(void)
69{ 65{
70
71 char *part_type = NULL;
72
73 h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size); 66 h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size);
74 67
75 if (!h720x_map.virt) { 68 if (!h720x_map.virt) {
@@ -92,16 +85,8 @@ static int __init h720x_mtd_init(void)
92 if (mymtd) { 85 if (mymtd) {
93 mymtd->owner = THIS_MODULE; 86 mymtd->owner = THIS_MODULE;
94 87
95 nr_mtd_parts = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0); 88 mtd_device_parse_register(mymtd, NULL, 0,
96 if (nr_mtd_parts > 0) 89 h720x_partitions, NUM_PARTITIONS);
97 part_type = "command line";
98 if (nr_mtd_parts <= 0) {
99 mtd_parts = h720x_partitions;
100 nr_mtd_parts = NUM_PARTITIONS;
101 part_type = "builtin";
102 }
103 printk(KERN_INFO "Using %s partition table\n", part_type);
104 mtd_device_register(mymtd, mtd_parts, nr_mtd_parts);
105 return 0; 90 return 0;
106 } 91 }
107 92
@@ -120,10 +105,6 @@ static void __exit h720x_mtd_cleanup(void)
120 map_destroy(mymtd); 105 map_destroy(mymtd);
121 } 106 }
122 107
123 /* Free partition info, if commandline partition was used */
124 if (mtd_parts && (mtd_parts != h720x_partitions))
125 kfree (mtd_parts);
126
127 if (h720x_map.virt) { 108 if (h720x_map.virt) {
128 iounmap((void *)h720x_map.virt); 109 iounmap((void *)h720x_map.virt);
129 h720x_map.virt = 0; 110 h720x_map.virt = 0;
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 404a50cbafa0..f47aedb24366 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -49,7 +49,7 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = {
49/* 49/*
50 * MTD partitioning stuff 50 * MTD partitioning stuff
51 */ 51 */
52static struct mtd_partition static_partitions[] = 52static struct mtd_partition partitions[] =
53{ 53{
54 { 54 {
55 .name = "FileSystem", 55 .name = "FileSystem",
@@ -58,16 +58,10 @@ static struct mtd_partition static_partitions[] =
58 }, 58 },
59}; 59};
60 60
61static int mtd_parts_nb[NUM_FLASHBANKS];
62static struct mtd_partition *mtd_parts[NUM_FLASHBANKS];
63
64static const char *probes[] = { "cmdlinepart", NULL };
65
66static int __init init_impa7(void) 61static int __init init_impa7(void)
67{ 62{
68 static const char *rom_probe_types[] = PROBETYPES; 63 static const char *rom_probe_types[] = PROBETYPES;
69 const char **type; 64 const char **type;
70 const char *part_type = 0;
71 int i; 65 int i;
72 static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = { 66 static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = {
73 { WINDOW_ADDR0, WINDOW_SIZE0 }, 67 { WINDOW_ADDR0, WINDOW_SIZE0 },
@@ -97,23 +91,9 @@ static int __init init_impa7(void)
97 if (impa7_mtd[i]) { 91 if (impa7_mtd[i]) {
98 impa7_mtd[i]->owner = THIS_MODULE; 92 impa7_mtd[i]->owner = THIS_MODULE;
99 devicesfound++; 93 devicesfound++;
100 mtd_parts_nb[i] = parse_mtd_partitions(impa7_mtd[i], 94 mtd_device_parse_register(impa7_mtd[i], NULL, 0,
101 probes, 95 partitions,
102 &mtd_parts[i], 96 ARRAY_SIZE(partitions));
103 0);
104 if (mtd_parts_nb[i] > 0) {
105 part_type = "command line";
106 } else {
107 mtd_parts[i] = static_partitions;
108 mtd_parts_nb[i] = ARRAY_SIZE(static_partitions);
109 part_type = "static";
110 }
111
112 printk(KERN_NOTICE MSG_PREFIX
113 "using %s partition definition\n",
114 part_type);
115 mtd_device_register(impa7_mtd[i],
116 mtd_parts[i], mtd_parts_nb[i]);
117 } 97 }
118 else 98 else
119 iounmap((void *)impa7_map[i].virt); 99 iounmap((void *)impa7_map[i].virt);
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index d2f47be8754b..08c239604ee4 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -44,7 +44,6 @@ struct vr_nor_mtd {
44 void __iomem *csr_base; 44 void __iomem *csr_base;
45 struct map_info map; 45 struct map_info map;
46 struct mtd_info *info; 46 struct mtd_info *info;
47 int nr_parts;
48 struct pci_dev *dev; 47 struct pci_dev *dev;
49}; 48};
50 49
@@ -71,13 +70,9 @@ static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p)
71 70
72static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p) 71static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
73{ 72{
74 struct mtd_partition *parts;
75 static const char *part_probes[] = { "cmdlinepart", NULL };
76
77 /* register the flash bank */ 73 /* register the flash bank */
78 /* partition the flash bank */ 74 /* partition the flash bank */
79 p->nr_parts = parse_mtd_partitions(p->info, part_probes, &parts, 0); 75 return mtd_device_parse_register(p->info, NULL, 0, NULL, 0);
80 return mtd_device_register(p->info, parts, p->nr_parts);
81} 76}
82 77
83static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) 78static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 1594a802631d..437fcd2f352f 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -38,7 +38,6 @@
38struct ixp2000_flash_info { 38struct ixp2000_flash_info {
39 struct mtd_info *mtd; 39 struct mtd_info *mtd;
40 struct map_info map; 40 struct map_info map;
41 struct mtd_partition *partitions;
42 struct resource *res; 41 struct resource *res;
43}; 42};
44 43
@@ -125,8 +124,6 @@ static int ixp2000_flash_remove(struct platform_device *dev)
125 if (info->map.map_priv_1) 124 if (info->map.map_priv_1)
126 iounmap((void *) info->map.map_priv_1); 125 iounmap((void *) info->map.map_priv_1);
127 126
128 kfree(info->partitions);
129
130 if (info->res) { 127 if (info->res) {
131 release_resource(info->res); 128 release_resource(info->res);
132 kfree(info->res); 129 kfree(info->res);
@@ -229,13 +226,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
229 } 226 }
230 info->mtd->owner = THIS_MODULE; 227 info->mtd->owner = THIS_MODULE;
231 228
232 err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0); 229 err = mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
233 if (err > 0) {
234 err = mtd_device_register(info->mtd, info->partitions, err);
235 if(err)
236 dev_err(&dev->dev, "Could not parse partitions\n");
237 }
238
239 if (err) 230 if (err)
240 goto Error; 231 goto Error;
241 232
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 155b21942f47..30409015a3de 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -145,7 +145,6 @@ static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr)
145struct ixp4xx_flash_info { 145struct ixp4xx_flash_info {
146 struct mtd_info *mtd; 146 struct mtd_info *mtd;
147 struct map_info map; 147 struct map_info map;
148 struct mtd_partition *partitions;
149 struct resource *res; 148 struct resource *res;
150}; 149};
151 150
@@ -168,8 +167,6 @@ static int ixp4xx_flash_remove(struct platform_device *dev)
168 if (info->map.virt) 167 if (info->map.virt)
169 iounmap(info->map.virt); 168 iounmap(info->map.virt);
170 169
171 kfree(info->partitions);
172
173 if (info->res) { 170 if (info->res) {
174 release_resource(info->res); 171 release_resource(info->res);
175 kfree(info->res); 172 kfree(info->res);
@@ -185,8 +182,6 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
185{ 182{
186 struct flash_platform_data *plat = dev->dev.platform_data; 183 struct flash_platform_data *plat = dev->dev.platform_data;
187 struct ixp4xx_flash_info *info; 184 struct ixp4xx_flash_info *info;
188 const char *part_type = NULL;
189 int nr_parts = 0;
190 int err = -1; 185 int err = -1;
191 186
192 if (!plat) 187 if (!plat)
@@ -252,28 +247,12 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
252 /* Use the fast version */ 247 /* Use the fast version */
253 info->map.write = ixp4xx_write16; 248 info->map.write = ixp4xx_write16;
254 249
255 nr_parts = parse_mtd_partitions(info->mtd, probes, &info->partitions, 250 err = mtd_device_parse_register(info->mtd, probes, dev->resource->start,
256 dev->resource->start); 251 plat->parts, plat->nr_parts);
257 if (nr_parts > 0) { 252 if (err) {
258 part_type = "dynamic";
259 } else {
260 info->partitions = plat->parts;
261 nr_parts = plat->nr_parts;
262 part_type = "static";
263 }
264 if (nr_parts == 0)
265 printk(KERN_NOTICE "IXP4xx flash: no partition info "
266 "available, registering whole flash\n");
267 else
268 printk(KERN_NOTICE "IXP4xx flash: using %s partition "
269 "definition\n", part_type);
270
271 err = mtd_device_register(info->mtd, info->partitions, nr_parts);
272 if (err)
273 printk(KERN_ERR "Could not parse partitions\n"); 253 printk(KERN_ERR "Could not parse partitions\n");
274
275 if (err)
276 goto Error; 254 goto Error;
255 }
277 256
278 return 0; 257 return 0;
279 258
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index 7e508969239e..4f10e27ada55 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -107,16 +107,12 @@ ltq_copy_to(struct map_info *map, unsigned long to,
107 spin_unlock_irqrestore(&ebu_lock, flags); 107 spin_unlock_irqrestore(&ebu_lock, flags);
108} 108}
109 109
110static const char const *part_probe_types[] = { "cmdlinepart", NULL };
111
112static int __init 110static int __init
113ltq_mtd_probe(struct platform_device *pdev) 111ltq_mtd_probe(struct platform_device *pdev)
114{ 112{
115 struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev); 113 struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev);
116 struct ltq_mtd *ltq_mtd; 114 struct ltq_mtd *ltq_mtd;
117 struct mtd_partition *parts;
118 struct resource *res; 115 struct resource *res;
119 int nr_parts = 0;
120 struct cfi_private *cfi; 116 struct cfi_private *cfi;
121 int err; 117 int err;
122 118
@@ -172,17 +168,8 @@ ltq_mtd_probe(struct platform_device *pdev)
172 cfi->addr_unlock1 ^= 1; 168 cfi->addr_unlock1 ^= 1;
173 cfi->addr_unlock2 ^= 1; 169 cfi->addr_unlock2 ^= 1;
174 170
175 nr_parts = parse_mtd_partitions(ltq_mtd->mtd, 171 err = mtd_device_parse_register(ltq_mtd->mtd, NULL, 0,
176 part_probe_types, &parts, 0); 172 ltq_mtd_data->parts, ltq_mtd_data->nr_parts);
177 if (nr_parts > 0) {
178 dev_info(&pdev->dev,
179 "using %d partitions from cmdline", nr_parts);
180 } else {
181 nr_parts = ltq_mtd_data->nr_parts;
182 parts = ltq_mtd_data->parts;
183 }
184
185 err = mtd_device_register(ltq_mtd->mtd, parts, nr_parts);
186 if (err) { 173 if (err) {
187 dev_err(&pdev->dev, "failed to add partitions\n"); 174 dev_err(&pdev->dev, "failed to add partitions\n");
188 goto err_destroy; 175 goto err_destroy;
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 5936c466e901..119baa7d7477 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -33,9 +33,6 @@ struct latch_addr_flash_info {
33 /* cache; could be found out of res */ 33 /* cache; could be found out of res */
34 unsigned long win_mask; 34 unsigned long win_mask;
35 35
36 int nr_parts;
37 struct mtd_partition *parts;
38
39 spinlock_t lock; 36 spinlock_t lock;
40}; 37};
41 38
@@ -97,8 +94,6 @@ static void lf_copy_from(struct map_info *map, void *to,
97 94
98static char *rom_probe_types[] = { "cfi_probe", NULL }; 95static char *rom_probe_types[] = { "cfi_probe", NULL };
99 96
100static char *part_probe_types[] = { "cmdlinepart", NULL };
101
102static int latch_addr_flash_remove(struct platform_device *dev) 97static int latch_addr_flash_remove(struct platform_device *dev)
103{ 98{
104 struct latch_addr_flash_info *info; 99 struct latch_addr_flash_info *info;
@@ -112,8 +107,6 @@ static int latch_addr_flash_remove(struct platform_device *dev)
112 latch_addr_data = dev->dev.platform_data; 107 latch_addr_data = dev->dev.platform_data;
113 108
114 if (info->mtd != NULL) { 109 if (info->mtd != NULL) {
115 if (info->nr_parts)
116 kfree(info->parts);
117 mtd_device_unregister(info->mtd); 110 mtd_device_unregister(info->mtd);
118 map_destroy(info->mtd); 111 map_destroy(info->mtd);
119 } 112 }
@@ -206,21 +199,8 @@ static int __devinit latch_addr_flash_probe(struct platform_device *dev)
206 } 199 }
207 info->mtd->owner = THIS_MODULE; 200 info->mtd->owner = THIS_MODULE;
208 201
209 err = parse_mtd_partitions(info->mtd, (const char **)part_probe_types, 202 mtd_device_parse_register(info->mtd, NULL, 0,
210 &info->parts, 0); 203 latch_addr_data->parts, latch_addr_data->nr_parts);
211 if (err > 0) {
212 mtd_device_register(info->mtd, info->parts, err);
213 return 0;
214 }
215 if (latch_addr_data->nr_parts) {
216 pr_notice("Using latch-addr-flash partition information\n");
217 mtd_device_register(info->mtd,
218 latch_addr_data->parts,
219 latch_addr_data->nr_parts);
220 return 0;
221 }
222
223 mtd_device_register(info->mtd, NULL, 0);
224 return 0; 204 return 0;
225 205
226iounmap: 206iounmap:
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index bbe168b65c26..e8e9fec23553 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -22,22 +22,6 @@
22#include <linux/mtd/map.h> 22#include <linux/mtd/map.h>
23#include <linux/mtd/mtd.h> 23#include <linux/mtd/mtd.h>
24 24
25#ifdef CONFIG_MTD_DEBUG
26static int debug = CONFIG_MTD_DEBUG_VERBOSE;
27module_param(debug, int, 0);
28MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy");
29#undef DEBUG
30#define DEBUG(n, format, arg...) \
31 if (n <= debug) { \
32 printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __func__ , ## arg); \
33 }
34
35#else
36#undef DEBUG
37#define DEBUG(n, arg...)
38static const int debug = 0;
39#endif
40
41#define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg) 25#define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg)
42 26
43#define DRIVER_DESC "PCMCIA Flash memory card driver" 27#define DRIVER_DESC "PCMCIA Flash memory card driver"
@@ -105,13 +89,13 @@ static caddr_t remap_window(struct map_info *map, unsigned long to)
105 int ret; 89 int ret;
106 90
107 if (!pcmcia_dev_present(dev->p_dev)) { 91 if (!pcmcia_dev_present(dev->p_dev)) {
108 DEBUG(1, "device removed"); 92 pr_debug("device removed\n");
109 return 0; 93 return 0;
110 } 94 }
111 95
112 offset = to & ~(dev->win_size-1); 96 offset = to & ~(dev->win_size-1);
113 if (offset != dev->offset) { 97 if (offset != dev->offset) {
114 DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x", 98 pr_debug("Remapping window from 0x%8.8x to 0x%8.8x\n",
115 dev->offset, offset); 99 dev->offset, offset);
116 ret = pcmcia_map_mem_page(dev->p_dev, win, offset); 100 ret = pcmcia_map_mem_page(dev->p_dev, win, offset);
117 if (ret != 0) 101 if (ret != 0)
@@ -132,7 +116,7 @@ static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
132 return d; 116 return d;
133 117
134 d.x[0] = readb(addr); 118 d.x[0] = readb(addr);
135 DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", ofs, addr, d.x[0]); 119 pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n", ofs, addr, d.x[0]);
136 return d; 120 return d;
137} 121}
138 122
@@ -147,7 +131,7 @@ static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
147 return d; 131 return d;
148 132
149 d.x[0] = readw(addr); 133 d.x[0] = readw(addr);
150 DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", ofs, addr, d.x[0]); 134 pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n", ofs, addr, d.x[0]);
151 return d; 135 return d;
152} 136}
153 137
@@ -157,7 +141,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long
157 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; 141 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
158 unsigned long win_size = dev->win_size; 142 unsigned long win_size = dev->win_size;
159 143
160 DEBUG(3, "to = %p from = %lu len = %zd", to, from, len); 144 pr_debug("to = %p from = %lu len = %zd\n", to, from, len);
161 while(len) { 145 while(len) {
162 int toread = win_size - (from & (win_size-1)); 146 int toread = win_size - (from & (win_size-1));
163 caddr_t addr; 147 caddr_t addr;
@@ -169,7 +153,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long
169 if(!addr) 153 if(!addr)
170 return; 154 return;
171 155
172 DEBUG(4, "memcpy from %p to %p len = %d", addr, to, toread); 156 pr_debug("memcpy from %p to %p len = %d\n", addr, to, toread);
173 memcpy_fromio(to, addr, toread); 157 memcpy_fromio(to, addr, toread);
174 len -= toread; 158 len -= toread;
175 to += toread; 159 to += toread;
@@ -185,7 +169,7 @@ static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long
185 if(!addr) 169 if(!addr)
186 return; 170 return;
187 171
188 DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", adr, addr, d.x[0]); 172 pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n", adr, addr, d.x[0]);
189 writeb(d.x[0], addr); 173 writeb(d.x[0], addr);
190} 174}
191 175
@@ -196,7 +180,7 @@ static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long
196 if(!addr) 180 if(!addr)
197 return; 181 return;
198 182
199 DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", adr, addr, d.x[0]); 183 pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n", adr, addr, d.x[0]);
200 writew(d.x[0], addr); 184 writew(d.x[0], addr);
201} 185}
202 186
@@ -206,7 +190,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v
206 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; 190 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
207 unsigned long win_size = dev->win_size; 191 unsigned long win_size = dev->win_size;
208 192
209 DEBUG(3, "to = %lu from = %p len = %zd", to, from, len); 193 pr_debug("to = %lu from = %p len = %zd\n", to, from, len);
210 while(len) { 194 while(len) {
211 int towrite = win_size - (to & (win_size-1)); 195 int towrite = win_size - (to & (win_size-1));
212 caddr_t addr; 196 caddr_t addr;
@@ -218,7 +202,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v
218 if(!addr) 202 if(!addr)
219 return; 203 return;
220 204
221 DEBUG(4, "memcpy from %p to %p len = %d", from, addr, towrite); 205 pr_debug("memcpy from %p to %p len = %d\n", from, addr, towrite);
222 memcpy_toio(addr, from, towrite); 206 memcpy_toio(addr, from, towrite);
223 len -= towrite; 207 len -= towrite;
224 to += towrite; 208 to += towrite;
@@ -240,7 +224,7 @@ static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
240 return d; 224 return d;
241 225
242 d.x[0] = readb(win_base + ofs); 226 d.x[0] = readb(win_base + ofs);
243 DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", 227 pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n",
244 ofs, win_base + ofs, d.x[0]); 228 ofs, win_base + ofs, d.x[0]);
245 return d; 229 return d;
246} 230}
@@ -255,7 +239,7 @@ static map_word pcmcia_read16(struct map_info *map, unsigned long ofs)
255 return d; 239 return d;
256 240
257 d.x[0] = readw(win_base + ofs); 241 d.x[0] = readw(win_base + ofs);
258 DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", 242 pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n",
259 ofs, win_base + ofs, d.x[0]); 243 ofs, win_base + ofs, d.x[0]);
260 return d; 244 return d;
261} 245}
@@ -268,7 +252,7 @@ static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from,
268 if(DEV_REMOVED(map)) 252 if(DEV_REMOVED(map))
269 return; 253 return;
270 254
271 DEBUG(3, "to = %p from = %lu len = %zd", to, from, len); 255 pr_debug("to = %p from = %lu len = %zd\n", to, from, len);
272 memcpy_fromio(to, win_base + from, len); 256 memcpy_fromio(to, win_base + from, len);
273} 257}
274 258
@@ -280,7 +264,7 @@ static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr)
280 if(DEV_REMOVED(map)) 264 if(DEV_REMOVED(map))
281 return; 265 return;
282 266
283 DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", 267 pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n",
284 adr, win_base + adr, d.x[0]); 268 adr, win_base + adr, d.x[0]);
285 writeb(d.x[0], win_base + adr); 269 writeb(d.x[0], win_base + adr);
286} 270}
@@ -293,7 +277,7 @@ static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr)
293 if(DEV_REMOVED(map)) 277 if(DEV_REMOVED(map))
294 return; 278 return;
295 279
296 DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", 280 pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n",
297 adr, win_base + adr, d.x[0]); 281 adr, win_base + adr, d.x[0]);
298 writew(d.x[0], win_base + adr); 282 writew(d.x[0], win_base + adr);
299} 283}
@@ -306,7 +290,7 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f
306 if(DEV_REMOVED(map)) 290 if(DEV_REMOVED(map))
307 return; 291 return;
308 292
309 DEBUG(3, "to = %lu from = %p len = %zd", to, from, len); 293 pr_debug("to = %lu from = %p len = %zd\n", to, from, len);
310 memcpy_toio(win_base + to, from, len); 294 memcpy_toio(win_base + to, from, len);
311} 295}
312 296
@@ -316,7 +300,7 @@ static void pcmciamtd_set_vpp(struct map_info *map, int on)
316 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; 300 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
317 struct pcmcia_device *link = dev->p_dev; 301 struct pcmcia_device *link = dev->p_dev;
318 302
319 DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp); 303 pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp);
320 pcmcia_fixup_vpp(link, on ? dev->vpp : 0); 304 pcmcia_fixup_vpp(link, on ? dev->vpp : 0);
321} 305}
322 306
@@ -325,7 +309,7 @@ static void pcmciamtd_release(struct pcmcia_device *link)
325{ 309{
326 struct pcmciamtd_dev *dev = link->priv; 310 struct pcmciamtd_dev *dev = link->priv;
327 311
328 DEBUG(3, "link = 0x%p", link); 312 pr_debug("link = 0x%p\n", link);
329 313
330 if (link->resource[2]->end) { 314 if (link->resource[2]->end) {
331 if(dev->win_base) { 315 if(dev->win_base) {
@@ -337,7 +321,6 @@ static void pcmciamtd_release(struct pcmcia_device *link)
337} 321}
338 322
339 323
340#ifdef CONFIG_MTD_DEBUG
341static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev, 324static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev,
342 tuple_t *tuple, 325 tuple_t *tuple,
343 void *priv_data) 326 void *priv_data)
@@ -347,7 +330,7 @@ static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev,
347 if (!pcmcia_parse_tuple(tuple, &parse)) { 330 if (!pcmcia_parse_tuple(tuple, &parse)) {
348 cistpl_format_t *t = &parse.format; 331 cistpl_format_t *t = &parse.format;
349 (void)t; /* Shut up, gcc */ 332 (void)t; /* Shut up, gcc */
350 DEBUG(2, "Format type: %u, Error Detection: %u, offset = %u, length =%u", 333 pr_debug("Format type: %u, Error Detection: %u, offset = %u, length =%u\n",
351 t->type, t->edc, t->offset, t->length); 334 t->type, t->edc, t->offset, t->length);
352 } 335 }
353 return -ENOSPC; 336 return -ENOSPC;
@@ -363,12 +346,11 @@ static int pcmciamtd_cistpl_jedec(struct pcmcia_device *p_dev,
363 if (!pcmcia_parse_tuple(tuple, &parse)) { 346 if (!pcmcia_parse_tuple(tuple, &parse)) {
364 cistpl_jedec_t *t = &parse.jedec; 347 cistpl_jedec_t *t = &parse.jedec;
365 for (i = 0; i < t->nid; i++) 348 for (i = 0; i < t->nid; i++)
366 DEBUG(2, "JEDEC: 0x%02x 0x%02x", 349 pr_debug("JEDEC: 0x%02x 0x%02x\n",
367 t->id[i].mfr, t->id[i].info); 350 t->id[i].mfr, t->id[i].info);
368 } 351 }
369 return -ENOSPC; 352 return -ENOSPC;
370} 353}
371#endif
372 354
373static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev, 355static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev,
374 tuple_t *tuple, 356 tuple_t *tuple,
@@ -382,14 +364,14 @@ static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev,
382 if (pcmcia_parse_tuple(tuple, &parse)) 364 if (pcmcia_parse_tuple(tuple, &parse))
383 return -EINVAL; 365 return -EINVAL;
384 366
385 DEBUG(2, "Common memory:"); 367 pr_debug("Common memory:\n");
386 dev->pcmcia_map.size = t->dev[0].size; 368 dev->pcmcia_map.size = t->dev[0].size;
387 /* from here on: DEBUG only */ 369 /* from here on: DEBUG only */
388 for (i = 0; i < t->ndev; i++) { 370 for (i = 0; i < t->ndev; i++) {
389 DEBUG(2, "Region %d, type = %u", i, t->dev[i].type); 371 pr_debug("Region %d, type = %u\n", i, t->dev[i].type);
390 DEBUG(2, "Region %d, wp = %u", i, t->dev[i].wp); 372 pr_debug("Region %d, wp = %u\n", i, t->dev[i].wp);
391 DEBUG(2, "Region %d, speed = %u ns", i, t->dev[i].speed); 373 pr_debug("Region %d, speed = %u ns\n", i, t->dev[i].speed);
392 DEBUG(2, "Region %d, size = %u bytes", i, t->dev[i].size); 374 pr_debug("Region %d, size = %u bytes\n", i, t->dev[i].size);
393 } 375 }
394 return 0; 376 return 0;
395} 377}
@@ -409,12 +391,12 @@ static int pcmciamtd_cistpl_geo(struct pcmcia_device *p_dev,
409 dev->pcmcia_map.bankwidth = t->geo[0].buswidth; 391 dev->pcmcia_map.bankwidth = t->geo[0].buswidth;
410 /* from here on: DEBUG only */ 392 /* from here on: DEBUG only */
411 for (i = 0; i < t->ngeo; i++) { 393 for (i = 0; i < t->ngeo; i++) {
412 DEBUG(2, "region: %d bankwidth = %u", i, t->geo[i].buswidth); 394 pr_debug("region: %d bankwidth = %u\n", i, t->geo[i].buswidth);
413 DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block); 395 pr_debug("region: %d erase_block = %u\n", i, t->geo[i].erase_block);
414 DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block); 396 pr_debug("region: %d read_block = %u\n", i, t->geo[i].read_block);
415 DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block); 397 pr_debug("region: %d write_block = %u\n", i, t->geo[i].write_block);
416 DEBUG(2, "region: %d partition = %u", i, t->geo[i].partition); 398 pr_debug("region: %d partition = %u\n", i, t->geo[i].partition);
417 DEBUG(2, "region: %d interleave = %u", i, t->geo[i].interleave); 399 pr_debug("region: %d interleave = %u\n", i, t->geo[i].interleave);
418 } 400 }
419 return 0; 401 return 0;
420} 402}
@@ -432,13 +414,11 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev
432 if (p_dev->prod_id[i]) 414 if (p_dev->prod_id[i])
433 strcat(dev->mtd_name, p_dev->prod_id[i]); 415 strcat(dev->mtd_name, p_dev->prod_id[i]);
434 } 416 }
435 DEBUG(2, "Found name: %s", dev->mtd_name); 417 pr_debug("Found name: %s\n", dev->mtd_name);
436 } 418 }
437 419
438#ifdef CONFIG_MTD_DEBUG
439 pcmcia_loop_tuple(p_dev, CISTPL_FORMAT, pcmciamtd_cistpl_format, NULL); 420 pcmcia_loop_tuple(p_dev, CISTPL_FORMAT, pcmciamtd_cistpl_format, NULL);
440 pcmcia_loop_tuple(p_dev, CISTPL_JEDEC_C, pcmciamtd_cistpl_jedec, NULL); 421 pcmcia_loop_tuple(p_dev, CISTPL_JEDEC_C, pcmciamtd_cistpl_jedec, NULL);
441#endif
442 pcmcia_loop_tuple(p_dev, CISTPL_DEVICE, pcmciamtd_cistpl_device, dev); 422 pcmcia_loop_tuple(p_dev, CISTPL_DEVICE, pcmciamtd_cistpl_device, dev);
443 pcmcia_loop_tuple(p_dev, CISTPL_DEVICE_GEO, pcmciamtd_cistpl_geo, dev); 423 pcmcia_loop_tuple(p_dev, CISTPL_DEVICE_GEO, pcmciamtd_cistpl_geo, dev);
444 424
@@ -450,12 +430,12 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev
450 430
451 if(force_size) { 431 if(force_size) {
452 dev->pcmcia_map.size = force_size << 20; 432 dev->pcmcia_map.size = force_size << 20;
453 DEBUG(2, "size forced to %dM", force_size); 433 pr_debug("size forced to %dM\n", force_size);
454 } 434 }
455 435
456 if(bankwidth) { 436 if(bankwidth) {
457 dev->pcmcia_map.bankwidth = bankwidth; 437 dev->pcmcia_map.bankwidth = bankwidth;
458 DEBUG(2, "bankwidth forced to %d", bankwidth); 438 pr_debug("bankwidth forced to %d\n", bankwidth);
459 } 439 }
460 440
461 dev->pcmcia_map.name = dev->mtd_name; 441 dev->pcmcia_map.name = dev->mtd_name;
@@ -464,7 +444,7 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev
464 *new_name = 1; 444 *new_name = 1;
465 } 445 }
466 446
467 DEBUG(1, "Device: Size: %lu Width:%d Name: %s", 447 pr_debug("Device: Size: %lu Width:%d Name: %s\n",
468 dev->pcmcia_map.size, 448 dev->pcmcia_map.size,
469 dev->pcmcia_map.bankwidth << 3, dev->mtd_name); 449 dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
470} 450}
@@ -479,7 +459,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
479 static char *probes[] = { "jedec_probe", "cfi_probe" }; 459 static char *probes[] = { "jedec_probe", "cfi_probe" };
480 int new_name = 0; 460 int new_name = 0;
481 461
482 DEBUG(3, "link=0x%p", link); 462 pr_debug("link=0x%p\n", link);
483 463
484 card_settings(dev, link, &new_name); 464 card_settings(dev, link, &new_name);
485 465
@@ -512,11 +492,11 @@ static int pcmciamtd_config(struct pcmcia_device *link)
512 492
513 do { 493 do {
514 int ret; 494 int ret;
515 DEBUG(2, "requesting window with size = %luKiB memspeed = %d", 495 pr_debug("requesting window with size = %luKiB memspeed = %d\n",
516 (unsigned long) resource_size(link->resource[2]) >> 10, 496 (unsigned long) resource_size(link->resource[2]) >> 10,
517 mem_speed); 497 mem_speed);
518 ret = pcmcia_request_window(link, link->resource[2], mem_speed); 498 ret = pcmcia_request_window(link, link->resource[2], mem_speed);
519 DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size); 499 pr_debug("ret = %d dev->win_size = %d\n", ret, dev->win_size);
520 if(ret) { 500 if(ret) {
521 j++; 501 j++;
522 link->resource[2]->start = 0; 502 link->resource[2]->start = 0;
@@ -524,21 +504,21 @@ static int pcmciamtd_config(struct pcmcia_device *link)
524 force_size << 20 : MAX_PCMCIA_ADDR; 504 force_size << 20 : MAX_PCMCIA_ADDR;
525 link->resource[2]->end >>= j; 505 link->resource[2]->end >>= j;
526 } else { 506 } else {
527 DEBUG(2, "Got window of size %luKiB", (unsigned long) 507 pr_debug("Got window of size %luKiB\n", (unsigned long)
528 resource_size(link->resource[2]) >> 10); 508 resource_size(link->resource[2]) >> 10);
529 dev->win_size = resource_size(link->resource[2]); 509 dev->win_size = resource_size(link->resource[2]);
530 break; 510 break;
531 } 511 }
532 } while (link->resource[2]->end >= 0x1000); 512 } while (link->resource[2]->end >= 0x1000);
533 513
534 DEBUG(2, "dev->win_size = %d", dev->win_size); 514 pr_debug("dev->win_size = %d\n", dev->win_size);
535 515
536 if(!dev->win_size) { 516 if(!dev->win_size) {
537 dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n"); 517 dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n");
538 pcmciamtd_release(link); 518 pcmciamtd_release(link);
539 return -ENODEV; 519 return -ENODEV;
540 } 520 }
541 DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10); 521 pr_debug("Allocated a window of %dKiB\n", dev->win_size >> 10);
542 522
543 /* Get write protect status */ 523 /* Get write protect status */
544 dev->win_base = ioremap(link->resource[2]->start, 524 dev->win_base = ioremap(link->resource[2]->start,
@@ -549,7 +529,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
549 pcmciamtd_release(link); 529 pcmciamtd_release(link);
550 return -ENODEV; 530 return -ENODEV;
551 } 531 }
552 DEBUG(1, "mapped window dev = %p @ %pR, base = %p", 532 pr_debug("mapped window dev = %p @ %pR, base = %p\n",
553 dev, link->resource[2], dev->win_base); 533 dev, link->resource[2], dev->win_base);
554 534
555 dev->offset = 0; 535 dev->offset = 0;
@@ -564,7 +544,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
564 } 544 }
565 545
566 link->config_index = 0; 546 link->config_index = 0;
567 DEBUG(2, "Setting Configuration"); 547 pr_debug("Setting Configuration\n");
568 ret = pcmcia_enable_device(link); 548 ret = pcmcia_enable_device(link);
569 if (ret != 0) { 549 if (ret != 0) {
570 if (dev->win_base) { 550 if (dev->win_base) {
@@ -580,17 +560,17 @@ static int pcmciamtd_config(struct pcmcia_device *link)
580 mtd = do_map_probe("map_rom", &dev->pcmcia_map); 560 mtd = do_map_probe("map_rom", &dev->pcmcia_map);
581 } else { 561 } else {
582 for(i = 0; i < ARRAY_SIZE(probes); i++) { 562 for(i = 0; i < ARRAY_SIZE(probes); i++) {
583 DEBUG(1, "Trying %s", probes[i]); 563 pr_debug("Trying %s\n", probes[i]);
584 mtd = do_map_probe(probes[i], &dev->pcmcia_map); 564 mtd = do_map_probe(probes[i], &dev->pcmcia_map);
585 if(mtd) 565 if(mtd)
586 break; 566 break;
587 567
588 DEBUG(1, "FAILED: %s", probes[i]); 568 pr_debug("FAILED: %s\n", probes[i]);
589 } 569 }
590 } 570 }
591 571
592 if(!mtd) { 572 if(!mtd) {
593 DEBUG(1, "Can not find an MTD"); 573 pr_debug("Can not find an MTD\n");
594 pcmciamtd_release(link); 574 pcmciamtd_release(link);
595 return -ENODEV; 575 return -ENODEV;
596 } 576 }
@@ -617,7 +597,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
617 /* If the memory found is fits completely into the mapped PCMCIA window, 597 /* If the memory found is fits completely into the mapped PCMCIA window,
618 use the faster non-remapping read/write functions */ 598 use the faster non-remapping read/write functions */
619 if(mtd->size <= dev->win_size) { 599 if(mtd->size <= dev->win_size) {
620 DEBUG(1, "Using non remapping memory functions"); 600 pr_debug("Using non remapping memory functions\n");
621 dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base; 601 dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base;
622 if (dev->pcmcia_map.bankwidth == 1) { 602 if (dev->pcmcia_map.bankwidth == 1) {
623 dev->pcmcia_map.read = pcmcia_read8; 603 dev->pcmcia_map.read = pcmcia_read8;
@@ -645,7 +625,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
645 625
646static int pcmciamtd_suspend(struct pcmcia_device *dev) 626static int pcmciamtd_suspend(struct pcmcia_device *dev)
647{ 627{
648 DEBUG(2, "EVENT_PM_RESUME"); 628 pr_debug("EVENT_PM_RESUME\n");
649 629
650 /* get_lock(link); */ 630 /* get_lock(link); */
651 631
@@ -654,7 +634,7 @@ static int pcmciamtd_suspend(struct pcmcia_device *dev)
654 634
655static int pcmciamtd_resume(struct pcmcia_device *dev) 635static int pcmciamtd_resume(struct pcmcia_device *dev)
656{ 636{
657 DEBUG(2, "EVENT_PM_SUSPEND"); 637 pr_debug("EVENT_PM_SUSPEND\n");
658 638
659 /* free_lock(link); */ 639 /* free_lock(link); */
660 640
@@ -666,7 +646,7 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
666{ 646{
667 struct pcmciamtd_dev *dev = link->priv; 647 struct pcmciamtd_dev *dev = link->priv;
668 648
669 DEBUG(3, "link=0x%p", link); 649 pr_debug("link=0x%p\n", link);
670 650
671 if(dev->mtd_info) { 651 if(dev->mtd_info) {
672 mtd_device_unregister(dev->mtd_info); 652 mtd_device_unregister(dev->mtd_info);
@@ -686,7 +666,7 @@ static int pcmciamtd_probe(struct pcmcia_device *link)
686 /* Create new memory card device */ 666 /* Create new memory card device */
687 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 667 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
688 if (!dev) return -ENOMEM; 668 if (!dev) return -ENOMEM;
689 DEBUG(1, "dev=0x%p", dev); 669 pr_debug("dev=0x%p\n", dev);
690 670
691 dev->p_dev = link; 671 dev->p_dev = link;
692 link->priv = dev; 672 link->priv = dev;
@@ -755,7 +735,7 @@ static int __init init_pcmciamtd(void)
755 735
756static void __exit exit_pcmciamtd(void) 736static void __exit exit_pcmciamtd(void)
757{ 737{
758 DEBUG(1, DRIVER_DESC " unloading"); 738 pr_debug(DRIVER_DESC " unloading");
759 pcmcia_unregister_driver(&pcmciamtd_driver); 739 pcmcia_unregister_driver(&pcmciamtd_driver);
760} 740}
761 741
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index f64cee4a3bfb..66e8200079c2 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -27,8 +27,6 @@ struct physmap_flash_info {
27 struct mtd_info *mtd[MAX_RESOURCES]; 27 struct mtd_info *mtd[MAX_RESOURCES];
28 struct mtd_info *cmtd; 28 struct mtd_info *cmtd;
29 struct map_info map[MAX_RESOURCES]; 29 struct map_info map[MAX_RESOURCES];
30 int nr_parts;
31 struct mtd_partition *parts;
32}; 30};
33 31
34static int physmap_flash_remove(struct platform_device *dev) 32static int physmap_flash_remove(struct platform_device *dev)
@@ -46,8 +44,6 @@ static int physmap_flash_remove(struct platform_device *dev)
46 44
47 if (info->cmtd) { 45 if (info->cmtd) {
48 mtd_device_unregister(info->cmtd); 46 mtd_device_unregister(info->cmtd);
49 if (info->nr_parts)
50 kfree(info->parts);
51 if (info->cmtd != info->mtd[0]) 47 if (info->cmtd != info->mtd[0])
52 mtd_concat_destroy(info->cmtd); 48 mtd_concat_destroy(info->cmtd);
53 } 49 }
@@ -175,23 +171,8 @@ static int physmap_flash_probe(struct platform_device *dev)
175 if (err) 171 if (err)
176 goto err_out; 172 goto err_out;
177 173
178 err = parse_mtd_partitions(info->cmtd, part_probe_types, 174 mtd_device_parse_register(info->cmtd, part_probe_types, 0,
179 &info->parts, 0); 175 physmap_data->parts, physmap_data->nr_parts);
180 if (err > 0) {
181 mtd_device_register(info->cmtd, info->parts, err);
182 info->nr_parts = err;
183 return 0;
184 }
185
186 if (physmap_data->nr_parts) {
187 printk(KERN_NOTICE "Using physmap partition information\n");
188 mtd_device_register(info->cmtd, physmap_data->parts,
189 physmap_data->nr_parts);
190 return 0;
191 }
192
193 mtd_device_register(info->cmtd, NULL, 0);
194
195 return 0; 176 return 0;
196 177
197err_out: 178err_out:
@@ -245,21 +226,6 @@ static struct platform_device physmap_flash = {
245 .num_resources = 1, 226 .num_resources = 1,
246 .resource = &physmap_flash_resource, 227 .resource = &physmap_flash_resource,
247}; 228};
248
249void physmap_configure(unsigned long addr, unsigned long size,
250 int bankwidth, void (*set_vpp)(struct map_info *, int))
251{
252 physmap_flash_resource.start = addr;
253 physmap_flash_resource.end = addr + size - 1;
254 physmap_flash_data.width = bankwidth;
255 physmap_flash_data.set_vpp = set_vpp;
256}
257
258void physmap_set_partitions(struct mtd_partition *parts, int num_parts)
259{
260 physmap_flash_data.nr_parts = num_parts;
261 physmap_flash_data.parts = parts;
262}
263#endif 229#endif
264 230
265static int __init physmap_init(void) 231static int __init physmap_init(void)
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index d251d1db129b..7d65f9d3e690 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -34,58 +34,10 @@ struct of_flash_list {
34 34
35struct of_flash { 35struct of_flash {
36 struct mtd_info *cmtd; 36 struct mtd_info *cmtd;
37 struct mtd_partition *parts;
38 int list_size; /* number of elements in of_flash_list */ 37 int list_size; /* number of elements in of_flash_list */
39 struct of_flash_list list[0]; 38 struct of_flash_list list[0];
40}; 39};
41 40
42#define OF_FLASH_PARTS(info) ((info)->parts)
43static int parse_obsolete_partitions(struct platform_device *dev,
44 struct of_flash *info,
45 struct device_node *dp)
46{
47 int i, plen, nr_parts;
48 const struct {
49 __be32 offset, len;
50 } *part;
51 const char *names;
52
53 part = of_get_property(dp, "partitions", &plen);
54 if (!part)
55 return 0; /* No partitions found */
56
57 dev_warn(&dev->dev, "Device tree uses obsolete partition map binding\n");
58
59 nr_parts = plen / sizeof(part[0]);
60
61 info->parts = kzalloc(nr_parts * sizeof(*info->parts), GFP_KERNEL);
62 if (!info->parts)
63 return -ENOMEM;
64
65 names = of_get_property(dp, "partition-names", &plen);
66
67 for (i = 0; i < nr_parts; i++) {
68 info->parts[i].offset = be32_to_cpu(part->offset);
69 info->parts[i].size = be32_to_cpu(part->len) & ~1;
70 if (be32_to_cpu(part->len) & 1) /* bit 0 set signifies read only partition */
71 info->parts[i].mask_flags = MTD_WRITEABLE;
72
73 if (names && (plen > 0)) {
74 int len = strlen(names) + 1;
75
76 info->parts[i].name = (char *)names;
77 plen -= len;
78 names += len;
79 } else {
80 info->parts[i].name = "unnamed";
81 }
82
83 part++;
84 }
85
86 return nr_parts;
87}
88
89static int of_flash_remove(struct platform_device *dev) 41static int of_flash_remove(struct platform_device *dev)
90{ 42{
91 struct of_flash *info; 43 struct of_flash *info;
@@ -101,11 +53,8 @@ static int of_flash_remove(struct platform_device *dev)
101 mtd_concat_destroy(info->cmtd); 53 mtd_concat_destroy(info->cmtd);
102 } 54 }
103 55
104 if (info->cmtd) { 56 if (info->cmtd)
105 if (OF_FLASH_PARTS(info))
106 kfree(OF_FLASH_PARTS(info));
107 mtd_device_unregister(info->cmtd); 57 mtd_device_unregister(info->cmtd);
108 }
109 58
110 for (i = 0; i < info->list_size; i++) { 59 for (i = 0; i < info->list_size; i++) {
111 if (info->list[i].mtd) 60 if (info->list[i].mtd)
@@ -165,7 +114,8 @@ static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
165 specifies the list of partition probers to use. If none is given then the 114 specifies the list of partition probers to use. If none is given then the
166 default is use. These take precedence over other device tree 115 default is use. These take precedence over other device tree
167 information. */ 116 information. */
168static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", NULL }; 117static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot",
118 "ofpart", "ofoldpart", NULL };
169static const char ** __devinit of_get_probes(struct device_node *dp) 119static const char ** __devinit of_get_probes(struct device_node *dp)
170{ 120{
171 const char *cp; 121 const char *cp;
@@ -218,6 +168,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
218 int reg_tuple_size; 168 int reg_tuple_size;
219 struct mtd_info **mtd_list = NULL; 169 struct mtd_info **mtd_list = NULL;
220 resource_size_t res_size; 170 resource_size_t res_size;
171 struct mtd_part_parser_data ppdata;
221 172
222 match = of_match_device(of_flash_match, &dev->dev); 173 match = of_match_device(of_flash_match, &dev->dev);
223 if (!match) 174 if (!match)
@@ -331,29 +282,12 @@ static int __devinit of_flash_probe(struct platform_device *dev)
331 if (err) 282 if (err)
332 goto err_out; 283 goto err_out;
333 284
285 ppdata.of_node = dp;
334 part_probe_types = of_get_probes(dp); 286 part_probe_types = of_get_probes(dp);
335 err = parse_mtd_partitions(info->cmtd, part_probe_types, 287 mtd_device_parse_register(info->cmtd, part_probe_types, &ppdata,
336 &info->parts, 0); 288 NULL, 0);
337 if (err < 0) {
338 of_free_probes(part_probe_types);
339 goto err_out;
340 }
341 of_free_probes(part_probe_types); 289 of_free_probes(part_probe_types);
342 290
343 if (err == 0) {
344 err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts);
345 if (err < 0)
346 goto err_out;
347 }
348
349 if (err == 0) {
350 err = parse_obsolete_partitions(dev, info, dp);
351 if (err < 0)
352 goto err_out;
353 }
354
355 mtd_device_register(info->cmtd, info->parts, err);
356
357 kfree(mtd_list); 291 kfree(mtd_list);
358 292
359 return 0; 293 return 0;
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 9ca1eccba4bc..94f553489725 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -44,8 +44,6 @@ struct platram_info {
44 struct device *dev; 44 struct device *dev;
45 struct mtd_info *mtd; 45 struct mtd_info *mtd;
46 struct map_info map; 46 struct map_info map;
47 struct mtd_partition *partitions;
48 bool free_partitions;
49 struct resource *area; 47 struct resource *area;
50 struct platdata_mtd_ram *pdata; 48 struct platdata_mtd_ram *pdata;
51}; 49};
@@ -95,10 +93,6 @@ static int platram_remove(struct platform_device *pdev)
95 93
96 if (info->mtd) { 94 if (info->mtd) {
97 mtd_device_unregister(info->mtd); 95 mtd_device_unregister(info->mtd);
98 if (info->partitions) {
99 if (info->free_partitions)
100 kfree(info->partitions);
101 }
102 map_destroy(info->mtd); 96 map_destroy(info->mtd);
103 } 97 }
104 98
@@ -228,21 +222,8 @@ static int platram_probe(struct platform_device *pdev)
228 /* check to see if there are any available partitions, or wether 222 /* check to see if there are any available partitions, or wether
229 * to add this device whole */ 223 * to add this device whole */
230 224
231 if (!pdata->nr_partitions) { 225 err = mtd_device_parse_register(info->mtd, pdata->probes, 0,
232 /* try to probe using the supplied probe type */ 226 pdata->partitions, pdata->nr_partitions);
233 if (pdata->probes) {
234 err = parse_mtd_partitions(info->mtd, pdata->probes,
235 &info->partitions, 0);
236 info->free_partitions = 1;
237 if (err > 0)
238 err = mtd_device_register(info->mtd,
239 info->partitions, err);
240 }
241 }
242 /* use the static mapping */
243 else
244 err = mtd_device_register(info->mtd, pdata->partitions,
245 pdata->nr_partitions);
246 if (!err) 227 if (!err)
247 dev_info(&pdev->dev, "registered mtd device\n"); 228 dev_info(&pdev->dev, "registered mtd device\n");
248 229
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 7ae137d4b998..411a17df9fc1 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -41,8 +41,6 @@ static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
41} 41}
42 42
43struct pxa2xx_flash_info { 43struct pxa2xx_flash_info {
44 struct mtd_partition *parts;
45 int nr_parts;
46 struct mtd_info *mtd; 44 struct mtd_info *mtd;
47 struct map_info map; 45 struct map_info map;
48}; 46};
@@ -55,9 +53,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
55{ 53{
56 struct flash_platform_data *flash = pdev->dev.platform_data; 54 struct flash_platform_data *flash = pdev->dev.platform_data;
57 struct pxa2xx_flash_info *info; 55 struct pxa2xx_flash_info *info;
58 struct mtd_partition *parts;
59 struct resource *res; 56 struct resource *res;
60 int ret = 0;
61 57
62 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 58 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
63 if (!res) 59 if (!res)
@@ -71,8 +67,6 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
71 info->map.bankwidth = flash->width; 67 info->map.bankwidth = flash->width;
72 info->map.phys = res->start; 68 info->map.phys = res->start;
73 info->map.size = resource_size(res); 69 info->map.size = resource_size(res);
74 info->parts = flash->parts;
75 info->nr_parts = flash->nr_parts;
76 70
77 info->map.virt = ioremap(info->map.phys, info->map.size); 71 info->map.virt = ioremap(info->map.phys, info->map.size);
78 if (!info->map.virt) { 72 if (!info->map.virt) {
@@ -104,18 +98,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
104 } 98 }
105 info->mtd->owner = THIS_MODULE; 99 info->mtd->owner = THIS_MODULE;
106 100
107 ret = parse_mtd_partitions(info->mtd, probes, &parts, 0); 101 mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
108
109 if (ret > 0) {
110 info->nr_parts = ret;
111 info->parts = parts;
112 }
113
114 if (!info->nr_parts)
115 printk("Registering %s as whole device\n",
116 info->map.name);
117
118 mtd_device_register(info->mtd, info->parts, info->nr_parts);
119 102
120 platform_set_drvdata(pdev, info); 103 platform_set_drvdata(pdev, info);
121 return 0; 104 return 0;
@@ -133,7 +116,6 @@ static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
133 iounmap(info->map.virt); 116 iounmap(info->map.virt);
134 if (info->map.cached) 117 if (info->map.cached)
135 iounmap(info->map.cached); 118 iounmap(info->map.cached);
136 kfree(info->parts);
137 kfree(info); 119 kfree(info);
138 return 0; 120 return 0;
139} 121}
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 761fb459d2c7..0237f197fd12 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -25,8 +25,6 @@
25struct rbtx4939_flash_info { 25struct rbtx4939_flash_info {
26 struct mtd_info *mtd; 26 struct mtd_info *mtd;
27 struct map_info map; 27 struct map_info map;
28 int nr_parts;
29 struct mtd_partition *parts;
30}; 28};
31 29
32static int rbtx4939_flash_remove(struct platform_device *dev) 30static int rbtx4939_flash_remove(struct platform_device *dev)
@@ -41,8 +39,6 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
41 if (info->mtd) { 39 if (info->mtd) {
42 struct rbtx4939_flash_data *pdata = dev->dev.platform_data; 40 struct rbtx4939_flash_data *pdata = dev->dev.platform_data;
43 41
44 if (info->nr_parts)
45 kfree(info->parts);
46 mtd_device_unregister(info->mtd); 42 mtd_device_unregister(info->mtd);
47 map_destroy(info->mtd); 43 map_destroy(info->mtd);
48 } 44 }
@@ -50,7 +46,6 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
50} 46}
51 47
52static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 48static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
53static const char *part_probe_types[] = { "cmdlinepart", NULL };
54 49
55static int rbtx4939_flash_probe(struct platform_device *dev) 50static int rbtx4939_flash_probe(struct platform_device *dev)
56{ 51{
@@ -107,22 +102,11 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
107 info->mtd->owner = THIS_MODULE; 102 info->mtd->owner = THIS_MODULE;
108 if (err) 103 if (err)
109 goto err_out; 104 goto err_out;
105 err = mtd_device_parse_register(info->mtd, NULL, 0,
106 pdata->parts, pdata->nr_parts);
110 107
111 err = parse_mtd_partitions(info->mtd, part_probe_types, 108 if (err)
112 &info->parts, 0); 109 goto err_out;
113 if (err > 0) {
114 mtd_device_register(info->mtd, info->parts, err);
115 info->nr_parts = err;
116 return 0;
117 }
118
119 if (pdata->nr_parts) {
120 pr_notice("Using rbtx4939 partition information\n");
121 mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
122 return 0;
123 }
124
125 mtd_device_register(info->mtd, NULL, 0);
126 return 0; 110 return 0;
127 111
128err_out: 112err_out:
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index a9b5e0e5c4c5..fa9c0a9670cd 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -131,10 +131,8 @@ struct sa_subdev_info {
131}; 131};
132 132
133struct sa_info { 133struct sa_info {
134 struct mtd_partition *parts;
135 struct mtd_info *mtd; 134 struct mtd_info *mtd;
136 int num_subdev; 135 int num_subdev;
137 unsigned int nr_parts;
138 struct sa_subdev_info subdev[0]; 136 struct sa_subdev_info subdev[0];
139}; 137};
140 138
@@ -231,8 +229,6 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
231 mtd_concat_destroy(info->mtd); 229 mtd_concat_destroy(info->mtd);
232 } 230 }
233 231
234 kfree(info->parts);
235
236 for (i = info->num_subdev - 1; i >= 0; i--) 232 for (i = info->num_subdev - 1; i >= 0; i--)
237 sa1100_destroy_subdev(&info->subdev[i]); 233 sa1100_destroy_subdev(&info->subdev[i]);
238 kfree(info); 234 kfree(info);
@@ -341,10 +337,8 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
341static int __devinit sa1100_mtd_probe(struct platform_device *pdev) 337static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
342{ 338{
343 struct flash_platform_data *plat = pdev->dev.platform_data; 339 struct flash_platform_data *plat = pdev->dev.platform_data;
344 struct mtd_partition *parts;
345 const char *part_type = NULL;
346 struct sa_info *info; 340 struct sa_info *info;
347 int err, nr_parts = 0; 341 int err;
348 342
349 if (!plat) 343 if (!plat)
350 return -ENODEV; 344 return -ENODEV;
@@ -358,26 +352,8 @@ static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
358 /* 352 /*
359 * Partition selection stuff. 353 * Partition selection stuff.
360 */ 354 */
361 nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0); 355 mtd_device_parse_register(info->mtd, part_probes, 0,
362 if (nr_parts > 0) { 356 plat->parts, plat->nr_parts);
363 info->parts = parts;
364 part_type = "dynamic";
365 } else {
366 parts = plat->parts;
367 nr_parts = plat->nr_parts;
368 part_type = "static";
369 }
370
371 if (nr_parts == 0)
372 printk(KERN_NOTICE "SA1100 flash: no partition info "
373 "available, registering whole flash\n");
374 else
375 printk(KERN_NOTICE "SA1100 flash: using %s partition "
376 "definition\n", part_type);
377
378 mtd_device_register(info->mtd, parts, nr_parts);
379
380 info->nr_parts = nr_parts;
381 357
382 platform_set_drvdata(pdev, info); 358 platform_set_drvdata(pdev, info);
383 err = 0; 359 err = 0;
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index cbf6bade9354..496c40704aff 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -19,8 +19,6 @@
19static struct mtd_info *flash_mtd; 19static struct mtd_info *flash_mtd;
20static struct mtd_info *eprom_mtd; 20static struct mtd_info *eprom_mtd;
21 21
22static struct mtd_partition *parsed_parts;
23
24struct map_info soleng_eprom_map = { 22struct map_info soleng_eprom_map = {
25 .name = "Solution Engine EPROM", 23 .name = "Solution Engine EPROM",
26 .size = 0x400000, 24 .size = 0x400000,
@@ -51,12 +49,14 @@ static struct mtd_partition superh_se_partitions[] = {
51 .size = MTDPART_SIZ_FULL, 49 .size = MTDPART_SIZ_FULL,
52 } 50 }
53}; 51};
52#define NUM_PARTITIONS ARRAY_SIZE(superh_se_partitions)
53#else
54#define superh_se_partitions NULL
55#define NUM_PARTITIONS 0
54#endif /* CONFIG_MTD_SUPERH_RESERVE */ 56#endif /* CONFIG_MTD_SUPERH_RESERVE */
55 57
56static int __init init_soleng_maps(void) 58static int __init init_soleng_maps(void)
57{ 59{
58 int nr_parts = 0;
59
60 /* First probe at offset 0 */ 60 /* First probe at offset 0 */
61 soleng_flash_map.phys = 0; 61 soleng_flash_map.phys = 0;
62 soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0); 62 soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0);
@@ -92,21 +92,8 @@ static int __init init_soleng_maps(void)
92 mtd_device_register(eprom_mtd, NULL, 0); 92 mtd_device_register(eprom_mtd, NULL, 0);
93 } 93 }
94 94
95 nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0); 95 mtd_device_parse_register(flash_mtd, probes, 0,
96 96 superh_se_partitions, NUM_PARTITIONS);
97#ifdef CONFIG_MTD_SUPERH_RESERVE
98 if (nr_parts <= 0) {
99 printk(KERN_NOTICE "Using configured partition at 0x%08x.\n",
100 CONFIG_MTD_SUPERH_RESERVE);
101 parsed_parts = superh_se_partitions;
102 nr_parts = sizeof(superh_se_partitions)/sizeof(*parsed_parts);
103 }
104#endif /* CONFIG_MTD_SUPERH_RESERVE */
105
106 if (nr_parts > 0)
107 mtd_device_register(flash_mtd, parsed_parts, nr_parts);
108 else
109 mtd_device_register(flash_mtd, NULL, 0);
110 97
111 return 0; 98 return 0;
112} 99}
@@ -118,10 +105,7 @@ static void __exit cleanup_soleng_maps(void)
118 map_destroy(eprom_mtd); 105 map_destroy(eprom_mtd);
119 } 106 }
120 107
121 if (parsed_parts) 108 mtd_device_unregister(flash_mtd);
122 mtd_device_unregister(flash_mtd);
123 else
124 mtd_device_unregister(flash_mtd);
125 map_destroy(flash_mtd); 109 map_destroy(flash_mtd);
126} 110}
127 111
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 901ce968efae..aa7e0cb2893c 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -20,7 +20,6 @@
20#include <asm/immap_cpm2.h> 20#include <asm/immap_cpm2.h>
21 21
22static struct mtd_info *sbcmtd[3]; 22static struct mtd_info *sbcmtd[3];
23static struct mtd_partition *sbcmtd_parts[3];
24 23
25struct map_info sbc82xx_flash_map[3] = { 24struct map_info sbc82xx_flash_map[3] = {
26 {.name = "Boot flash"}, 25 {.name = "Boot flash"},
@@ -101,6 +100,7 @@ static int __init init_sbc82xx_flash(void)
101 for (i=0; i<3; i++) { 100 for (i=0; i<3; i++) {
102 int8_t flashcs[3] = { 0, 6, 1 }; 101 int8_t flashcs[3] = { 0, 6, 1 };
103 int nr_parts; 102 int nr_parts;
103 struct mtd_partition *defparts;
104 104
105 printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d", 105 printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d",
106 sbc82xx_flash_map[i].name, 106 sbc82xx_flash_map[i].name,
@@ -113,7 +113,8 @@ static int __init init_sbc82xx_flash(void)
113 } 113 }
114 printk(" at %08lx)\n", sbc82xx_flash_map[i].phys); 114 printk(" at %08lx)\n", sbc82xx_flash_map[i].phys);
115 115
116 sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys, sbc82xx_flash_map[i].size); 116 sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys,
117 sbc82xx_flash_map[i].size);
117 118
118 if (!sbc82xx_flash_map[i].virt) { 119 if (!sbc82xx_flash_map[i].virt) {
119 printk("Failed to ioremap\n"); 120 printk("Failed to ioremap\n");
@@ -129,24 +130,20 @@ static int __init init_sbc82xx_flash(void)
129 130
130 sbcmtd[i]->owner = THIS_MODULE; 131 sbcmtd[i]->owner = THIS_MODULE;
131 132
132 nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes,
133 &sbcmtd_parts[i], 0);
134 if (nr_parts > 0) {
135 mtd_device_register(sbcmtd[i], sbcmtd_parts[i],
136 nr_parts);
137 continue;
138 }
139
140 /* No partitioning detected. Use default */ 133 /* No partitioning detected. Use default */
141 if (i == 2) { 134 if (i == 2) {
142 mtd_device_register(sbcmtd[i], NULL, 0); 135 defparts = NULL;
136 nr_parts = 0;
143 } else if (i == bigflash) { 137 } else if (i == bigflash) {
144 mtd_device_register(sbcmtd[i], bigflash_parts, 138 defparts = bigflash_parts;
145 ARRAY_SIZE(bigflash_parts)); 139 nr_parts = ARRAY_SIZE(bigflash_parts);
146 } else { 140 } else {
147 mtd_device_register(sbcmtd[i], smallflash_parts, 141 defparts = smallflash_parts;
148 ARRAY_SIZE(smallflash_parts)); 142 nr_parts = ARRAY_SIZE(smallflash_parts);
149 } 143 }
144
145 mtd_device_parse_register(sbcmtd[i], part_probes, 0,
146 defparts, nr_parts);
150 } 147 }
151 return 0; 148 return 0;
152} 149}
@@ -159,12 +156,8 @@ static void __exit cleanup_sbc82xx_flash(void)
159 if (!sbcmtd[i]) 156 if (!sbcmtd[i])
160 continue; 157 continue;
161 158
162 if (i<2 || sbcmtd_parts[i]) 159 mtd_device_unregister(sbcmtd[i]);
163 mtd_device_unregister(sbcmtd[i]);
164 else
165 mtd_device_unregister(sbcmtd[i]);
166 160
167 kfree(sbcmtd_parts[i]);
168 map_destroy(sbcmtd[i]); 161 map_destroy(sbcmtd[i]);
169 162
170 iounmap((void *)sbc82xx_flash_map[i].virt); 163 iounmap((void *)sbc82xx_flash_map[i].virt);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index ca385697446e..ed8b5e744b12 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -426,6 +426,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
426 new->rq->queuedata = new; 426 new->rq->queuedata = new;
427 blk_queue_logical_block_size(new->rq, tr->blksize); 427 blk_queue_logical_block_size(new->rq, tr->blksize);
428 428
429 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
430
429 if (tr->discard) { 431 if (tr->discard) {
430 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); 432 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
431 new->rq->limits.max_discard_sectors = UINT_MAX; 433 new->rq->limits.max_discard_sectors = UINT_MAX;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 3326615ad66b..7c1dc908a174 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -44,7 +44,7 @@ struct mtdblk_dev {
44 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; 44 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
45}; 45};
46 46
47static struct mutex mtdblks_lock; 47static DEFINE_MUTEX(mtdblks_lock);
48 48
49/* 49/*
50 * Cache stuff... 50 * Cache stuff...
@@ -119,7 +119,7 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
119 if (mtdblk->cache_state != STATE_DIRTY) 119 if (mtdblk->cache_state != STATE_DIRTY)
120 return 0; 120 return 0;
121 121
122 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" " 122 pr_debug("mtdblock: writing cached data for \"%s\" "
123 "at 0x%lx, size 0x%x\n", mtd->name, 123 "at 0x%lx, size 0x%x\n", mtd->name,
124 mtdblk->cache_offset, mtdblk->cache_size); 124 mtdblk->cache_offset, mtdblk->cache_size);
125 125
@@ -148,7 +148,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
148 size_t retlen; 148 size_t retlen;
149 int ret; 149 int ret;
150 150
151 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n", 151 pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
152 mtd->name, pos, len); 152 mtd->name, pos, len);
153 153
154 if (!sect_size) 154 if (!sect_size)
@@ -218,7 +218,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
218 size_t retlen; 218 size_t retlen;
219 int ret; 219 int ret;
220 220
221 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n", 221 pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
222 mtd->name, pos, len); 222 mtd->name, pos, len);
223 223
224 if (!sect_size) 224 if (!sect_size)
@@ -283,7 +283,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
283{ 283{
284 struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); 284 struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
285 285
286 DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n"); 286 pr_debug("mtdblock_open\n");
287 287
288 mutex_lock(&mtdblks_lock); 288 mutex_lock(&mtdblks_lock);
289 if (mtdblk->count) { 289 if (mtdblk->count) {
@@ -303,7 +303,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
303 303
304 mutex_unlock(&mtdblks_lock); 304 mutex_unlock(&mtdblks_lock);
305 305
306 DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); 306 pr_debug("ok\n");
307 307
308 return 0; 308 return 0;
309} 309}
@@ -312,7 +312,7 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
312{ 312{
313 struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); 313 struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
314 314
315 DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); 315 pr_debug("mtdblock_release\n");
316 316
317 mutex_lock(&mtdblks_lock); 317 mutex_lock(&mtdblks_lock);
318 318
@@ -329,7 +329,7 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
329 329
330 mutex_unlock(&mtdblks_lock); 330 mutex_unlock(&mtdblks_lock);
331 331
332 DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); 332 pr_debug("ok\n");
333 333
334 return 0; 334 return 0;
335} 335}
@@ -389,8 +389,6 @@ static struct mtd_blktrans_ops mtdblock_tr = {
389 389
390static int __init init_mtdblock(void) 390static int __init init_mtdblock(void)
391{ 391{
392 mutex_init(&mtdblks_lock);
393
394 return register_mtd_blktrans(&mtdblock_tr); 392 return register_mtd_blktrans(&mtdblock_tr);
395} 393}
396 394
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 61086ea3cc6b..e7dc732ddabc 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -43,7 +43,7 @@ static struct vfsmount *mtd_inode_mnt __read_mostly;
43 43
44/* 44/*
45 * Data structure to hold the pointer to the mtd device as well 45 * Data structure to hold the pointer to the mtd device as well
46 * as mode information ofr various use cases. 46 * as mode information of various use cases.
47 */ 47 */
48struct mtd_file_info { 48struct mtd_file_info {
49 struct mtd_info *mtd; 49 struct mtd_info *mtd;
@@ -86,7 +86,7 @@ static int mtd_open(struct inode *inode, struct file *file)
86 struct mtd_file_info *mfi; 86 struct mtd_file_info *mfi;
87 struct inode *mtd_ino; 87 struct inode *mtd_ino;
88 88
89 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); 89 pr_debug("MTD_open\n");
90 90
91 /* You can't open the RO devices RW */ 91 /* You can't open the RO devices RW */
92 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) 92 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
@@ -151,7 +151,7 @@ static int mtd_close(struct inode *inode, struct file *file)
151 struct mtd_file_info *mfi = file->private_data; 151 struct mtd_file_info *mfi = file->private_data;
152 struct mtd_info *mtd = mfi->mtd; 152 struct mtd_info *mtd = mfi->mtd;
153 153
154 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); 154 pr_debug("MTD_close\n");
155 155
156 /* Only sync if opened RW */ 156 /* Only sync if opened RW */
157 if ((file->f_mode & FMODE_WRITE) && mtd->sync) 157 if ((file->f_mode & FMODE_WRITE) && mtd->sync)
@@ -195,7 +195,7 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
195 size_t size = count; 195 size_t size = count;
196 char *kbuf; 196 char *kbuf;
197 197
198 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); 198 pr_debug("MTD_read\n");
199 199
200 if (*ppos + count > mtd->size) 200 if (*ppos + count > mtd->size)
201 count = mtd->size - *ppos; 201 count = mtd->size - *ppos;
@@ -211,17 +211,17 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
211 len = min_t(size_t, count, size); 211 len = min_t(size_t, count, size);
212 212
213 switch (mfi->mode) { 213 switch (mfi->mode) {
214 case MTD_MODE_OTP_FACTORY: 214 case MTD_FILE_MODE_OTP_FACTORY:
215 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); 215 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
216 break; 216 break;
217 case MTD_MODE_OTP_USER: 217 case MTD_FILE_MODE_OTP_USER:
218 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 218 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
219 break; 219 break;
220 case MTD_MODE_RAW: 220 case MTD_FILE_MODE_RAW:
221 { 221 {
222 struct mtd_oob_ops ops; 222 struct mtd_oob_ops ops;
223 223
224 ops.mode = MTD_OOB_RAW; 224 ops.mode = MTD_OPS_RAW;
225 ops.datbuf = kbuf; 225 ops.datbuf = kbuf;
226 ops.oobbuf = NULL; 226 ops.oobbuf = NULL;
227 ops.len = len; 227 ops.len = len;
@@ -233,16 +233,16 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
233 default: 233 default:
234 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf); 234 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
235 } 235 }
236 /* Nand returns -EBADMSG on ecc errors, but it returns 236 /* Nand returns -EBADMSG on ECC errors, but it returns
237 * the data. For our userspace tools it is important 237 * the data. For our userspace tools it is important
238 * to dump areas with ecc errors ! 238 * to dump areas with ECC errors!
239 * For kernel internal usage it also might return -EUCLEAN 239 * For kernel internal usage it also might return -EUCLEAN
240 * to signal the caller that a bitflip has occurred and has 240 * to signal the caller that a bitflip has occurred and has
241 * been corrected by the ECC algorithm. 241 * been corrected by the ECC algorithm.
242 * Userspace software which accesses NAND this way 242 * Userspace software which accesses NAND this way
243 * must be aware of the fact that it deals with NAND 243 * must be aware of the fact that it deals with NAND
244 */ 244 */
245 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) { 245 if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
246 *ppos += retlen; 246 *ppos += retlen;
247 if (copy_to_user(buf, kbuf, retlen)) { 247 if (copy_to_user(buf, kbuf, retlen)) {
248 kfree(kbuf); 248 kfree(kbuf);
@@ -278,7 +278,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
278 int ret=0; 278 int ret=0;
279 int len; 279 int len;
280 280
281 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n"); 281 pr_debug("MTD_write\n");
282 282
283 if (*ppos == mtd->size) 283 if (*ppos == mtd->size)
284 return -ENOSPC; 284 return -ENOSPC;
@@ -302,10 +302,10 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
302 } 302 }
303 303
304 switch (mfi->mode) { 304 switch (mfi->mode) {
305 case MTD_MODE_OTP_FACTORY: 305 case MTD_FILE_MODE_OTP_FACTORY:
306 ret = -EROFS; 306 ret = -EROFS;
307 break; 307 break;
308 case MTD_MODE_OTP_USER: 308 case MTD_FILE_MODE_OTP_USER:
309 if (!mtd->write_user_prot_reg) { 309 if (!mtd->write_user_prot_reg) {
310 ret = -EOPNOTSUPP; 310 ret = -EOPNOTSUPP;
311 break; 311 break;
@@ -313,13 +313,14 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
313 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 313 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
314 break; 314 break;
315 315
316 case MTD_MODE_RAW: 316 case MTD_FILE_MODE_RAW:
317 { 317 {
318 struct mtd_oob_ops ops; 318 struct mtd_oob_ops ops;
319 319
320 ops.mode = MTD_OOB_RAW; 320 ops.mode = MTD_OPS_RAW;
321 ops.datbuf = kbuf; 321 ops.datbuf = kbuf;
322 ops.oobbuf = NULL; 322 ops.oobbuf = NULL;
323 ops.ooboffs = 0;
323 ops.len = len; 324 ops.len = len;
324 325
325 ret = mtd->write_oob(mtd, *ppos, &ops); 326 ret = mtd->write_oob(mtd, *ppos, &ops);
@@ -367,13 +368,13 @@ static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
367 if (!mtd->read_fact_prot_reg) 368 if (!mtd->read_fact_prot_reg)
368 ret = -EOPNOTSUPP; 369 ret = -EOPNOTSUPP;
369 else 370 else
370 mfi->mode = MTD_MODE_OTP_FACTORY; 371 mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
371 break; 372 break;
372 case MTD_OTP_USER: 373 case MTD_OTP_USER:
373 if (!mtd->read_fact_prot_reg) 374 if (!mtd->read_fact_prot_reg)
374 ret = -EOPNOTSUPP; 375 ret = -EOPNOTSUPP;
375 else 376 else
376 mfi->mode = MTD_MODE_OTP_USER; 377 mfi->mode = MTD_FILE_MODE_OTP_USER;
377 break; 378 break;
378 default: 379 default:
379 ret = -EINVAL; 380 ret = -EINVAL;
@@ -390,6 +391,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
390 uint64_t start, uint32_t length, void __user *ptr, 391 uint64_t start, uint32_t length, void __user *ptr,
391 uint32_t __user *retp) 392 uint32_t __user *retp)
392{ 393{
394 struct mtd_file_info *mfi = file->private_data;
393 struct mtd_oob_ops ops; 395 struct mtd_oob_ops ops;
394 uint32_t retlen; 396 uint32_t retlen;
395 int ret = 0; 397 int ret = 0;
@@ -409,9 +411,10 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
409 return ret; 411 return ret;
410 412
411 ops.ooblen = length; 413 ops.ooblen = length;
412 ops.ooboffs = start & (mtd->oobsize - 1); 414 ops.ooboffs = start & (mtd->writesize - 1);
413 ops.datbuf = NULL; 415 ops.datbuf = NULL;
414 ops.mode = MTD_OOB_PLACE; 416 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
417 MTD_OPS_PLACE_OOB;
415 418
416 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 419 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
417 return -EINVAL; 420 return -EINVAL;
@@ -420,7 +423,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
420 if (IS_ERR(ops.oobbuf)) 423 if (IS_ERR(ops.oobbuf))
421 return PTR_ERR(ops.oobbuf); 424 return PTR_ERR(ops.oobbuf);
422 425
423 start &= ~((uint64_t)mtd->oobsize - 1); 426 start &= ~((uint64_t)mtd->writesize - 1);
424 ret = mtd->write_oob(mtd, start, &ops); 427 ret = mtd->write_oob(mtd, start, &ops);
425 428
426 if (ops.oobretlen > 0xFFFFFFFFU) 429 if (ops.oobretlen > 0xFFFFFFFFU)
@@ -433,9 +436,11 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
433 return ret; 436 return ret;
434} 437}
435 438
436static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start, 439static int mtd_do_readoob(struct file *file, struct mtd_info *mtd,
437 uint32_t length, void __user *ptr, uint32_t __user *retp) 440 uint64_t start, uint32_t length, void __user *ptr,
441 uint32_t __user *retp)
438{ 442{
443 struct mtd_file_info *mfi = file->private_data;
439 struct mtd_oob_ops ops; 444 struct mtd_oob_ops ops;
440 int ret = 0; 445 int ret = 0;
441 446
@@ -451,9 +456,10 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
451 return ret; 456 return ret;
452 457
453 ops.ooblen = length; 458 ops.ooblen = length;
454 ops.ooboffs = start & (mtd->oobsize - 1); 459 ops.ooboffs = start & (mtd->writesize - 1);
455 ops.datbuf = NULL; 460 ops.datbuf = NULL;
456 ops.mode = MTD_OOB_PLACE; 461 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
462 MTD_OPS_PLACE_OOB;
457 463
458 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 464 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
459 return -EINVAL; 465 return -EINVAL;
@@ -462,7 +468,7 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
462 if (!ops.oobbuf) 468 if (!ops.oobbuf)
463 return -ENOMEM; 469 return -ENOMEM;
464 470
465 start &= ~((uint64_t)mtd->oobsize - 1); 471 start &= ~((uint64_t)mtd->writesize - 1);
466 ret = mtd->read_oob(mtd, start, &ops); 472 ret = mtd->read_oob(mtd, start, &ops);
467 473
468 if (put_user(ops.oobretlen, retp)) 474 if (put_user(ops.oobretlen, retp))
@@ -472,13 +478,29 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
472 ret = -EFAULT; 478 ret = -EFAULT;
473 479
474 kfree(ops.oobbuf); 480 kfree(ops.oobbuf);
481
482 /*
483 * NAND returns -EBADMSG on ECC errors, but it returns the OOB
484 * data. For our userspace tools it is important to dump areas
485 * with ECC errors!
486 * For kernel internal usage it also might return -EUCLEAN
487 * to signal the caller that a bitflip has occured and has
488 * been corrected by the ECC algorithm.
489 *
490 * Note: currently the standard NAND function, nand_read_oob_std,
491 * does not calculate ECC for the OOB area, so do not rely on
492 * this behavior unless you have replaced it with your own.
493 */
494 if (mtd_is_bitflip_or_eccerr(ret))
495 return 0;
496
475 return ret; 497 return ret;
476} 498}
477 499
478/* 500/*
479 * Copies (and truncates, if necessary) data from the larger struct, 501 * Copies (and truncates, if necessary) data from the larger struct,
480 * nand_ecclayout, to the smaller, deprecated layout struct, 502 * nand_ecclayout, to the smaller, deprecated layout struct,
481 * nand_ecclayout_user. This is necessary only to suppport the deprecated 503 * nand_ecclayout_user. This is necessary only to support the deprecated
482 * API ioctl ECCGETLAYOUT while allowing all new functionality to use 504 * API ioctl ECCGETLAYOUT while allowing all new functionality to use
483 * nand_ecclayout flexibly (i.e. the struct may change size in new 505 * nand_ecclayout flexibly (i.e. the struct may change size in new
484 * releases without requiring major rewrites). 506 * releases without requiring major rewrites).
@@ -544,6 +566,55 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
544 } 566 }
545} 567}
546 568
569static int mtd_write_ioctl(struct mtd_info *mtd,
570 struct mtd_write_req __user *argp)
571{
572 struct mtd_write_req req;
573 struct mtd_oob_ops ops;
574 void __user *usr_data, *usr_oob;
575 int ret;
576
577 if (copy_from_user(&req, argp, sizeof(req)) ||
578 !access_ok(VERIFY_READ, req.usr_data, req.len) ||
579 !access_ok(VERIFY_READ, req.usr_oob, req.ooblen))
580 return -EFAULT;
581 if (!mtd->write_oob)
582 return -EOPNOTSUPP;
583
584 ops.mode = req.mode;
585 ops.len = (size_t)req.len;
586 ops.ooblen = (size_t)req.ooblen;
587 ops.ooboffs = 0;
588
589 usr_data = (void __user *)(uintptr_t)req.usr_data;
590 usr_oob = (void __user *)(uintptr_t)req.usr_oob;
591
592 if (req.usr_data) {
593 ops.datbuf = memdup_user(usr_data, ops.len);
594 if (IS_ERR(ops.datbuf))
595 return PTR_ERR(ops.datbuf);
596 } else {
597 ops.datbuf = NULL;
598 }
599
600 if (req.usr_oob) {
601 ops.oobbuf = memdup_user(usr_oob, ops.ooblen);
602 if (IS_ERR(ops.oobbuf)) {
603 kfree(ops.datbuf);
604 return PTR_ERR(ops.oobbuf);
605 }
606 } else {
607 ops.oobbuf = NULL;
608 }
609
610 ret = mtd->write_oob(mtd, (loff_t)req.start, &ops);
611
612 kfree(ops.datbuf);
613 kfree(ops.oobbuf);
614
615 return ret;
616}
617
547static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) 618static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
548{ 619{
549 struct mtd_file_info *mfi = file->private_data; 620 struct mtd_file_info *mfi = file->private_data;
@@ -553,7 +624,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
553 u_long size; 624 u_long size;
554 struct mtd_info_user info; 625 struct mtd_info_user info;
555 626
556 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); 627 pr_debug("MTD_ioctl\n");
557 628
558 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; 629 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
559 if (cmd & IOC_IN) { 630 if (cmd & IOC_IN) {
@@ -601,8 +672,8 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
601 info.erasesize = mtd->erasesize; 672 info.erasesize = mtd->erasesize;
602 info.writesize = mtd->writesize; 673 info.writesize = mtd->writesize;
603 info.oobsize = mtd->oobsize; 674 info.oobsize = mtd->oobsize;
604 /* The below fields are obsolete */ 675 /* The below field is obsolete */
605 info.ecctype = -1; 676 info.padding = 0;
606 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 677 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
607 return -EFAULT; 678 return -EFAULT;
608 break; 679 break;
@@ -698,7 +769,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
698 if (copy_from_user(&buf, argp, sizeof(buf))) 769 if (copy_from_user(&buf, argp, sizeof(buf)))
699 ret = -EFAULT; 770 ret = -EFAULT;
700 else 771 else
701 ret = mtd_do_readoob(mtd, buf.start, buf.length, 772 ret = mtd_do_readoob(file, mtd, buf.start, buf.length,
702 buf.ptr, &buf_user->start); 773 buf.ptr, &buf_user->start);
703 break; 774 break;
704 } 775 }
@@ -725,12 +796,19 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
725 if (copy_from_user(&buf, argp, sizeof(buf))) 796 if (copy_from_user(&buf, argp, sizeof(buf)))
726 ret = -EFAULT; 797 ret = -EFAULT;
727 else 798 else
728 ret = mtd_do_readoob(mtd, buf.start, buf.length, 799 ret = mtd_do_readoob(file, mtd, buf.start, buf.length,
729 (void __user *)(uintptr_t)buf.usr_ptr, 800 (void __user *)(uintptr_t)buf.usr_ptr,
730 &buf_user->length); 801 &buf_user->length);
731 break; 802 break;
732 } 803 }
733 804
805 case MEMWRITE:
806 {
807 ret = mtd_write_ioctl(mtd,
808 (struct mtd_write_req __user *)arg);
809 break;
810 }
811
734 case MEMLOCK: 812 case MEMLOCK:
735 { 813 {
736 struct erase_info_user einfo; 814 struct erase_info_user einfo;
@@ -827,7 +905,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
827 if (copy_from_user(&mode, argp, sizeof(int))) 905 if (copy_from_user(&mode, argp, sizeof(int)))
828 return -EFAULT; 906 return -EFAULT;
829 907
830 mfi->mode = MTD_MODE_NORMAL; 908 mfi->mode = MTD_FILE_MODE_NORMAL;
831 909
832 ret = otp_select_filemode(mfi, mode); 910 ret = otp_select_filemode(mfi, mode);
833 911
@@ -843,11 +921,11 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
843 return -ENOMEM; 921 return -ENOMEM;
844 ret = -EOPNOTSUPP; 922 ret = -EOPNOTSUPP;
845 switch (mfi->mode) { 923 switch (mfi->mode) {
846 case MTD_MODE_OTP_FACTORY: 924 case MTD_FILE_MODE_OTP_FACTORY:
847 if (mtd->get_fact_prot_info) 925 if (mtd->get_fact_prot_info)
848 ret = mtd->get_fact_prot_info(mtd, buf, 4096); 926 ret = mtd->get_fact_prot_info(mtd, buf, 4096);
849 break; 927 break;
850 case MTD_MODE_OTP_USER: 928 case MTD_FILE_MODE_OTP_USER:
851 if (mtd->get_user_prot_info) 929 if (mtd->get_user_prot_info)
852 ret = mtd->get_user_prot_info(mtd, buf, 4096); 930 ret = mtd->get_user_prot_info(mtd, buf, 4096);
853 break; 931 break;
@@ -871,7 +949,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
871 { 949 {
872 struct otp_info oinfo; 950 struct otp_info oinfo;
873 951
874 if (mfi->mode != MTD_MODE_OTP_USER) 952 if (mfi->mode != MTD_FILE_MODE_OTP_USER)
875 return -EINVAL; 953 return -EINVAL;
876 if (copy_from_user(&oinfo, argp, sizeof(oinfo))) 954 if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
877 return -EFAULT; 955 return -EFAULT;
@@ -882,7 +960,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
882 } 960 }
883#endif 961#endif
884 962
885 /* This ioctl is being deprecated - it truncates the ecc layout */ 963 /* This ioctl is being deprecated - it truncates the ECC layout */
886 case ECCGETLAYOUT: 964 case ECCGETLAYOUT:
887 { 965 {
888 struct nand_ecclayout_user *usrlay; 966 struct nand_ecclayout_user *usrlay;
@@ -915,17 +993,17 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
915 mfi->mode = 0; 993 mfi->mode = 0;
916 994
917 switch(arg) { 995 switch(arg) {
918 case MTD_MODE_OTP_FACTORY: 996 case MTD_FILE_MODE_OTP_FACTORY:
919 case MTD_MODE_OTP_USER: 997 case MTD_FILE_MODE_OTP_USER:
920 ret = otp_select_filemode(mfi, arg); 998 ret = otp_select_filemode(mfi, arg);
921 break; 999 break;
922 1000
923 case MTD_MODE_RAW: 1001 case MTD_FILE_MODE_RAW:
924 if (!mtd->read_oob || !mtd->write_oob) 1002 if (!mtd->read_oob || !mtd->write_oob)
925 return -EOPNOTSUPP; 1003 return -EOPNOTSUPP;
926 mfi->mode = arg; 1004 mfi->mode = arg;
927 1005
928 case MTD_MODE_NORMAL: 1006 case MTD_FILE_MODE_NORMAL:
929 break; 1007 break;
930 default: 1008 default:
931 ret = -EINVAL; 1009 ret = -EINVAL;
@@ -1011,7 +1089,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
1011 if (copy_from_user(&buf, argp, sizeof(buf))) 1089 if (copy_from_user(&buf, argp, sizeof(buf)))
1012 ret = -EFAULT; 1090 ret = -EFAULT;
1013 else 1091 else
1014 ret = mtd_do_readoob(mtd, buf.start, 1092 ret = mtd_do_readoob(file, mtd, buf.start,
1015 buf.length, compat_ptr(buf.ptr), 1093 buf.length, compat_ptr(buf.ptr),
1016 &buf_user->start); 1094 &buf_user->start);
1017 break; 1095 break;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index e601672a5305..6df4d4d4eb92 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -95,10 +95,10 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
95 95
96 /* Save information about bitflips! */ 96 /* Save information about bitflips! */
97 if (unlikely(err)) { 97 if (unlikely(err)) {
98 if (err == -EBADMSG) { 98 if (mtd_is_eccerr(err)) {
99 mtd->ecc_stats.failed++; 99 mtd->ecc_stats.failed++;
100 ret = err; 100 ret = err;
101 } else if (err == -EUCLEAN) { 101 } else if (mtd_is_bitflip(err)) {
102 mtd->ecc_stats.corrected++; 102 mtd->ecc_stats.corrected++;
103 /* Do not overwrite -EBADMSG !! */ 103 /* Do not overwrite -EBADMSG !! */
104 if (!ret) 104 if (!ret)
@@ -279,10 +279,10 @@ concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
279 279
280 /* Save information about bitflips! */ 280 /* Save information about bitflips! */
281 if (unlikely(err)) { 281 if (unlikely(err)) {
282 if (err == -EBADMSG) { 282 if (mtd_is_eccerr(err)) {
283 mtd->ecc_stats.failed++; 283 mtd->ecc_stats.failed++;
284 ret = err; 284 ret = err;
285 } else if (err == -EUCLEAN) { 285 } else if (mtd_is_bitflip(err)) {
286 mtd->ecc_stats.corrected++; 286 mtd->ecc_stats.corrected++;
287 /* Do not overwrite -EBADMSG !! */ 287 /* Do not overwrite -EBADMSG !! */
288 if (!ret) 288 if (!ret)
@@ -770,7 +770,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
770 770
771 /* 771 /*
772 * Set up the new "super" device's MTD object structure, check for 772 * Set up the new "super" device's MTD object structure, check for
773 * incompatibilites between the subdevices. 773 * incompatibilities between the subdevices.
774 */ 774 */
775 concat->mtd.type = subdev[0]->type; 775 concat->mtd.type = subdev[0]->type;
776 concat->mtd.flags = subdev[0]->flags; 776 concat->mtd.flags = subdev[0]->flags;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index c510aff289a8..b01993ea260e 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -362,7 +362,7 @@ int add_mtd_device(struct mtd_info *mtd)
362 MTD_DEVT(i) + 1, 362 MTD_DEVT(i) + 1,
363 NULL, "mtd%dro", i); 363 NULL, "mtd%dro", i);
364 364
365 DEBUG(0, "mtd: Giving out device %d to %s\n", i, mtd->name); 365 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
366 /* No need to get a refcount on the module containing 366 /* No need to get a refcount on the module containing
367 the notifier, since we hold the mtd_table_mutex */ 367 the notifier, since we hold the mtd_table_mutex */
368 list_for_each_entry(not, &mtd_notifiers, list) 368 list_for_each_entry(not, &mtd_notifiers, list)
@@ -429,27 +429,63 @@ out_error:
429} 429}
430 430
431/** 431/**
432 * mtd_device_register - register an MTD device. 432 * mtd_device_parse_register - parse partitions and register an MTD device.
433 * 433 *
434 * @master: the MTD device to register 434 * @mtd: the MTD device to register
435 * @parts: the partitions to register - only valid if nr_parts > 0 435 * @types: the list of MTD partition probes to try, see
436 * @nr_parts: the number of partitions in parts. If zero then the full MTD 436 * 'parse_mtd_partitions()' for more information
437 * device is registered 437 * @parser_data: MTD partition parser-specific data
438 * @parts: fallback partition information to register, if parsing fails;
439 * only valid if %nr_parts > %0
440 * @nr_parts: the number of partitions in parts, if zero then the full
441 * MTD device is registered if no partition info is found
438 * 442 *
439 * Register an MTD device with the system and optionally, a number of 443 * This function aggregates MTD partitions parsing (done by
440 * partitions. If nr_parts is 0 then the whole device is registered, otherwise 444 * 'parse_mtd_partitions()') and MTD device and partitions registering. It
441 * only the partitions are registered. To register both the full device *and* 445 * basically follows the most common pattern found in many MTD drivers:
442 * the partitions, call mtd_device_register() twice, once with nr_parts == 0 446 *
443 * and once equal to the number of partitions. 447 * * It first tries to probe partitions on MTD device @mtd using parsers
448 * specified in @types (if @types is %NULL, then the default list of parsers
449 * is used, see 'parse_mtd_partitions()' for more information). If none are
450 * found this functions tries to fallback to information specified in
451 * @parts/@nr_parts.
452 * * If any partitioning info was found, this function registers the found
453 * partitions.
454 * * If no partitions were found this function just registers the MTD device
455 * @mtd and exits.
456 *
457 * Returns zero in case of success and a negative error code in case of failure.
444 */ 458 */
445int mtd_device_register(struct mtd_info *master, 459int mtd_device_parse_register(struct mtd_info *mtd, const char **types,
446 const struct mtd_partition *parts, 460 struct mtd_part_parser_data *parser_data,
447 int nr_parts) 461 const struct mtd_partition *parts,
462 int nr_parts)
448{ 463{
449 return parts ? add_mtd_partitions(master, parts, nr_parts) : 464 int err;
450 add_mtd_device(master); 465 struct mtd_partition *real_parts;
466
467 err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
468 if (err <= 0 && nr_parts && parts) {
469 real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
470 GFP_KERNEL);
471 if (!real_parts)
472 err = -ENOMEM;
473 else
474 err = nr_parts;
475 }
476
477 if (err > 0) {
478 err = add_mtd_partitions(mtd, real_parts, err);
479 kfree(real_parts);
480 } else if (err == 0) {
481 err = add_mtd_device(mtd);
482 if (err == 1)
483 err = -ENODEV;
484 }
485
486 return err;
451} 487}
452EXPORT_SYMBOL_GPL(mtd_device_register); 488EXPORT_SYMBOL_GPL(mtd_device_parse_register);
453 489
454/** 490/**
455 * mtd_device_unregister - unregister an existing MTD device. 491 * mtd_device_unregister - unregister an existing MTD device.
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 0ed6126b4c1f..961a38408542 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -15,6 +15,9 @@ extern int del_mtd_device(struct mtd_info *mtd);
15extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, 15extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *,
16 int); 16 int);
17extern int del_mtd_partitions(struct mtd_info *); 17extern int del_mtd_partitions(struct mtd_info *);
18extern int parse_mtd_partitions(struct mtd_info *master, const char **types,
19 struct mtd_partition **pparts,
20 struct mtd_part_parser_data *data);
18 21
19#define mtd_for_each_device(mtd) \ 22#define mtd_for_each_device(mtd) \
20 for ((mtd) = __mtd_next_device(0); \ 23 for ((mtd) = __mtd_next_device(0); \
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index e3e40f440323..1e2fa6236705 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -258,7 +258,7 @@ static void find_next_position(struct mtdoops_context *cxt)
258 ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, 258 ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
259 &retlen, (u_char *) &count[0]); 259 &retlen, (u_char *) &count[0]);
260 if (retlen != MTDOOPS_HEADER_SIZE || 260 if (retlen != MTDOOPS_HEADER_SIZE ||
261 (ret < 0 && ret != -EUCLEAN)) { 261 (ret < 0 && !mtd_is_bitflip(ret))) {
262 printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n", 262 printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
263 page * record_size, retlen, 263 page * record_size, retlen,
264 MTDOOPS_HEADER_SIZE, ret); 264 MTDOOPS_HEADER_SIZE, ret);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 630be3e7da04..a0bd2de4752b 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -73,9 +73,9 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
73 res = part->master->read(part->master, from + part->offset, 73 res = part->master->read(part->master, from + part->offset,
74 len, retlen, buf); 74 len, retlen, buf);
75 if (unlikely(res)) { 75 if (unlikely(res)) {
76 if (res == -EUCLEAN) 76 if (mtd_is_bitflip(res))
77 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; 77 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
78 if (res == -EBADMSG) 78 if (mtd_is_eccerr(res))
79 mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed; 79 mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
80 } 80 }
81 return res; 81 return res;
@@ -130,7 +130,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
130 if (ops->oobbuf) { 130 if (ops->oobbuf) {
131 size_t len, pages; 131 size_t len, pages;
132 132
133 if (ops->mode == MTD_OOB_AUTO) 133 if (ops->mode == MTD_OPS_AUTO_OOB)
134 len = mtd->oobavail; 134 len = mtd->oobavail;
135 else 135 else
136 len = mtd->oobsize; 136 len = mtd->oobsize;
@@ -142,9 +142,9 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
142 142
143 res = part->master->read_oob(part->master, from + part->offset, ops); 143 res = part->master->read_oob(part->master, from + part->offset, ops);
144 if (unlikely(res)) { 144 if (unlikely(res)) {
145 if (res == -EUCLEAN) 145 if (mtd_is_bitflip(res))
146 mtd->ecc_stats.corrected++; 146 mtd->ecc_stats.corrected++;
147 if (res == -EBADMSG) 147 if (mtd_is_eccerr(res))
148 mtd->ecc_stats.failed++; 148 mtd->ecc_stats.failed++;
149 } 149 }
150 return res; 150 return res;
@@ -479,6 +479,19 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
479 (unsigned long long)cur_offset, (unsigned long long)slave->offset); 479 (unsigned long long)cur_offset, (unsigned long long)slave->offset);
480 } 480 }
481 } 481 }
482 if (slave->offset == MTDPART_OFS_RETAIN) {
483 slave->offset = cur_offset;
484 if (master->size - slave->offset >= slave->mtd.size) {
485 slave->mtd.size = master->size - slave->offset
486 - slave->mtd.size;
487 } else {
488 printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
489 part->name, master->size - slave->offset,
490 slave->mtd.size);
491 /* register to preserve ordering */
492 goto out_register;
493 }
494 }
482 if (slave->mtd.size == MTDPART_SIZ_FULL) 495 if (slave->mtd.size == MTDPART_SIZ_FULL)
483 slave->mtd.size = master->size - slave->offset; 496 slave->mtd.size = master->size - slave->offset;
484 497
@@ -693,6 +706,8 @@ static struct mtd_part_parser *get_partition_parser(const char *name)
693 return ret; 706 return ret;
694} 707}
695 708
709#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
710
696int register_mtd_parser(struct mtd_part_parser *p) 711int register_mtd_parser(struct mtd_part_parser *p)
697{ 712{
698 spin_lock(&part_parser_lock); 713 spin_lock(&part_parser_lock);
@@ -712,19 +727,51 @@ int deregister_mtd_parser(struct mtd_part_parser *p)
712} 727}
713EXPORT_SYMBOL_GPL(deregister_mtd_parser); 728EXPORT_SYMBOL_GPL(deregister_mtd_parser);
714 729
730/*
731 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
732 * are changing this array!
733 */
734static const char *default_mtd_part_types[] = {
735 "cmdlinepart",
736 "ofpart",
737 NULL
738};
739
740/**
741 * parse_mtd_partitions - parse MTD partitions
742 * @master: the master partition (describes whole MTD device)
743 * @types: names of partition parsers to try or %NULL
744 * @pparts: array of partitions found is returned here
745 * @data: MTD partition parser-specific data
746 *
747 * This function tries to find partition on MTD device @master. It uses MTD
748 * partition parsers, specified in @types. However, if @types is %NULL, then
749 * the default list of parsers is used. The default list contains only the
750 * "cmdlinepart" and "ofpart" parsers ATM.
751 *
752 * This function may return:
753 * o a negative error code in case of failure
754 * o zero if no partitions were found
755 * o a positive number of found partitions, in which case on exit @pparts will
756 * point to an array containing this number of &struct mtd_info objects.
757 */
715int parse_mtd_partitions(struct mtd_info *master, const char **types, 758int parse_mtd_partitions(struct mtd_info *master, const char **types,
716 struct mtd_partition **pparts, unsigned long origin) 759 struct mtd_partition **pparts,
760 struct mtd_part_parser_data *data)
717{ 761{
718 struct mtd_part_parser *parser; 762 struct mtd_part_parser *parser;
719 int ret = 0; 763 int ret = 0;
720 764
765 if (!types)
766 types = default_mtd_part_types;
767
721 for ( ; ret <= 0 && *types; types++) { 768 for ( ; ret <= 0 && *types; types++) {
722 parser = get_partition_parser(*types); 769 parser = get_partition_parser(*types);
723 if (!parser && !request_module("%s", *types)) 770 if (!parser && !request_module("%s", *types))
724 parser = get_partition_parser(*types); 771 parser = get_partition_parser(*types);
725 if (!parser) 772 if (!parser)
726 continue; 773 continue;
727 ret = (*parser->parse_fn)(master, pparts, origin); 774 ret = (*parser->parse_fn)(master, pparts, data);
728 if (ret > 0) { 775 if (ret > 0) {
729 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", 776 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
730 ret, parser->name, master->name); 777 ret, parser->name, master->name);
@@ -733,7 +780,6 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
733 } 780 }
734 return ret; 781 return ret;
735} 782}
736EXPORT_SYMBOL_GPL(parse_mtd_partitions);
737 783
738int mtd_is_partition(struct mtd_info *mtd) 784int mtd_is_partition(struct mtd_info *mtd)
739{ 785{
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 89f8e66448ab..a90bfe79916d 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -27,12 +27,12 @@ static int get_sb_mtd_compare(struct super_block *sb, void *_mtd)
27 struct mtd_info *mtd = _mtd; 27 struct mtd_info *mtd = _mtd;
28 28
29 if (sb->s_mtd == mtd) { 29 if (sb->s_mtd == mtd) {
30 DEBUG(2, "MTDSB: Match on device %d (\"%s\")\n", 30 pr_debug("MTDSB: Match on device %d (\"%s\")\n",
31 mtd->index, mtd->name); 31 mtd->index, mtd->name);
32 return 1; 32 return 1;
33 } 33 }
34 34
35 DEBUG(2, "MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n", 35 pr_debug("MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n",
36 sb->s_mtd->index, sb->s_mtd->name, mtd->index, mtd->name); 36 sb->s_mtd->index, sb->s_mtd->name, mtd->index, mtd->name);
37 return 0; 37 return 0;
38} 38}
@@ -71,7 +71,7 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
71 goto already_mounted; 71 goto already_mounted;
72 72
73 /* fresh new superblock */ 73 /* fresh new superblock */
74 DEBUG(1, "MTDSB: New superblock for device %d (\"%s\")\n", 74 pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
75 mtd->index, mtd->name); 75 mtd->index, mtd->name);
76 76
77 sb->s_flags = flags; 77 sb->s_flags = flags;
@@ -88,7 +88,7 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
88 88
89 /* new mountpoint for an already mounted superblock */ 89 /* new mountpoint for an already mounted superblock */
90already_mounted: 90already_mounted:
91 DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n", 91 pr_debug("MTDSB: Device %d (\"%s\") is already mounted\n",
92 mtd->index, mtd->name); 92 mtd->index, mtd->name);
93 put_mtd_device(mtd); 93 put_mtd_device(mtd);
94 return dget(sb->s_root); 94 return dget(sb->s_root);
@@ -109,7 +109,7 @@ static struct dentry *mount_mtd_nr(struct file_system_type *fs_type, int flags,
109 109
110 mtd = get_mtd_device(NULL, mtdnr); 110 mtd = get_mtd_device(NULL, mtdnr);
111 if (IS_ERR(mtd)) { 111 if (IS_ERR(mtd)) {
112 DEBUG(0, "MTDSB: Device #%u doesn't appear to exist\n", mtdnr); 112 pr_debug("MTDSB: Device #%u doesn't appear to exist\n", mtdnr);
113 return ERR_CAST(mtd); 113 return ERR_CAST(mtd);
114 } 114 }
115 115
@@ -132,7 +132,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
132 if (!dev_name) 132 if (!dev_name)
133 return ERR_PTR(-EINVAL); 133 return ERR_PTR(-EINVAL);
134 134
135 DEBUG(2, "MTDSB: dev_name \"%s\"\n", dev_name); 135 pr_debug("MTDSB: dev_name \"%s\"\n", dev_name);
136 136
137 /* the preferred way of mounting in future; especially when 137 /* the preferred way of mounting in future; especially when
138 * CONFIG_BLOCK=n - we specify the underlying MTD device by number or 138 * CONFIG_BLOCK=n - we specify the underlying MTD device by number or
@@ -143,7 +143,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
143 struct mtd_info *mtd; 143 struct mtd_info *mtd;
144 144
145 /* mount by MTD device name */ 145 /* mount by MTD device name */
146 DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n", 146 pr_debug("MTDSB: mtd:%%s, name \"%s\"\n",
147 dev_name + 4); 147 dev_name + 4);
148 148
149 mtd = get_mtd_device_nm(dev_name + 4); 149 mtd = get_mtd_device_nm(dev_name + 4);
@@ -164,7 +164,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
164 mtdnr = simple_strtoul(dev_name + 3, &endptr, 0); 164 mtdnr = simple_strtoul(dev_name + 3, &endptr, 0);
165 if (!*endptr) { 165 if (!*endptr) {
166 /* It was a valid number */ 166 /* It was a valid number */
167 DEBUG(1, "MTDSB: mtd%%d, mtdnr %d\n", 167 pr_debug("MTDSB: mtd%%d, mtdnr %d\n",
168 mtdnr); 168 mtdnr);
169 return mount_mtd_nr(fs_type, flags, 169 return mount_mtd_nr(fs_type, flags,
170 dev_name, data, 170 dev_name, data,
@@ -180,10 +180,10 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
180 bdev = lookup_bdev(dev_name); 180 bdev = lookup_bdev(dev_name);
181 if (IS_ERR(bdev)) { 181 if (IS_ERR(bdev)) {
182 ret = PTR_ERR(bdev); 182 ret = PTR_ERR(bdev);
183 DEBUG(1, "MTDSB: lookup_bdev() returned %d\n", ret); 183 pr_debug("MTDSB: lookup_bdev() returned %d\n", ret);
184 return ERR_PTR(ret); 184 return ERR_PTR(ret);
185 } 185 }
186 DEBUG(1, "MTDSB: lookup_bdev() returned 0\n"); 186 pr_debug("MTDSB: lookup_bdev() returned 0\n");
187 187
188 ret = -EINVAL; 188 ret = -EINVAL;
189 189
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index fd7885327611..bd9590c723e4 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -86,7 +86,7 @@ struct swap_eb {
86 unsigned int flags; 86 unsigned int flags;
87 unsigned int active_count; 87 unsigned int active_count;
88 unsigned int erase_count; 88 unsigned int erase_count;
89 unsigned int pad; /* speeds up pointer decremtnt */ 89 unsigned int pad; /* speeds up pointer decrement */
90}; 90};
91 91
92#define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \ 92#define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \
@@ -314,7 +314,7 @@ static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
314{ 314{
315 int ret = d->mtd->read_oob(d->mtd, from, ops); 315 int ret = d->mtd->read_oob(d->mtd, from, ops);
316 316
317 if (ret == -EUCLEAN) 317 if (mtd_is_bitflip(ret))
318 return ret; 318 return ret;
319 319
320 if (ret) { 320 if (ret) {
@@ -350,11 +350,11 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
350 ops.oobbuf = d->oob_buf; 350 ops.oobbuf = d->oob_buf;
351 ops.ooboffs = 0; 351 ops.ooboffs = 0;
352 ops.datbuf = NULL; 352 ops.datbuf = NULL;
353 ops.mode = MTD_OOB_AUTO; 353 ops.mode = MTD_OPS_AUTO_OOB;
354 354
355 ret = mtdswap_read_oob(d, offset, &ops); 355 ret = mtdswap_read_oob(d, offset, &ops);
356 356
357 if (ret && ret != -EUCLEAN) 357 if (ret && !mtd_is_bitflip(ret))
358 return ret; 358 return ret;
359 359
360 data = (struct mtdswap_oobdata *)d->oob_buf; 360 data = (struct mtdswap_oobdata *)d->oob_buf;
@@ -363,7 +363,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
363 363
364 if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) { 364 if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
365 eb->erase_count = le32_to_cpu(data->count); 365 eb->erase_count = le32_to_cpu(data->count);
366 if (ret == -EUCLEAN) 366 if (mtd_is_bitflip(ret))
367 ret = MTDSWAP_SCANNED_BITFLIP; 367 ret = MTDSWAP_SCANNED_BITFLIP;
368 else { 368 else {
369 if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY) 369 if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY)
@@ -389,7 +389,7 @@ static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
389 389
390 ops.ooboffs = 0; 390 ops.ooboffs = 0;
391 ops.oobbuf = (uint8_t *)&n; 391 ops.oobbuf = (uint8_t *)&n;
392 ops.mode = MTD_OOB_AUTO; 392 ops.mode = MTD_OPS_AUTO_OOB;
393 ops.datbuf = NULL; 393 ops.datbuf = NULL;
394 394
395 if (marker == MTDSWAP_TYPE_CLEAN) { 395 if (marker == MTDSWAP_TYPE_CLEAN) {
@@ -408,7 +408,7 @@ static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
408 if (ret) { 408 if (ret) {
409 dev_warn(d->dev, "Write OOB failed for block at %08llx " 409 dev_warn(d->dev, "Write OOB failed for block at %08llx "
410 "error %d\n", offset, ret); 410 "error %d\n", offset, ret);
411 if (ret == -EIO || ret == -EBADMSG) 411 if (ret == -EIO || mtd_is_eccerr(ret))
412 mtdswap_handle_write_error(d, eb); 412 mtdswap_handle_write_error(d, eb);
413 return ret; 413 return ret;
414 } 414 }
@@ -628,7 +628,7 @@ static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page,
628 TREE_COUNT(d, CLEAN)--; 628 TREE_COUNT(d, CLEAN)--;
629 629
630 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY); 630 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY);
631 } while (ret == -EIO || ret == -EBADMSG); 631 } while (ret == -EIO || mtd_is_eccerr(ret));
632 632
633 if (ret) 633 if (ret)
634 return ret; 634 return ret;
@@ -678,7 +678,7 @@ retry:
678 ret = mtdswap_map_free_block(d, page, bp); 678 ret = mtdswap_map_free_block(d, page, bp);
679 eb = d->eb_data + (*bp / d->pages_per_eblk); 679 eb = d->eb_data + (*bp / d->pages_per_eblk);
680 680
681 if (ret == -EIO || ret == -EBADMSG) { 681 if (ret == -EIO || mtd_is_eccerr(ret)) {
682 d->curr_write = NULL; 682 d->curr_write = NULL;
683 eb->active_count--; 683 eb->active_count--;
684 d->revmap[*bp] = PAGE_UNDEF; 684 d->revmap[*bp] = PAGE_UNDEF;
@@ -690,7 +690,7 @@ retry:
690 690
691 writepos = (loff_t)*bp << PAGE_SHIFT; 691 writepos = (loff_t)*bp << PAGE_SHIFT;
692 ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf); 692 ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf);
693 if (ret == -EIO || ret == -EBADMSG) { 693 if (ret == -EIO || mtd_is_eccerr(ret)) {
694 d->curr_write_pos--; 694 d->curr_write_pos--;
695 eb->active_count--; 695 eb->active_count--;
696 d->revmap[*bp] = PAGE_UNDEF; 696 d->revmap[*bp] = PAGE_UNDEF;
@@ -738,7 +738,7 @@ static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock,
738retry: 738retry:
739 ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf); 739 ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
740 740
741 if (ret < 0 && ret != -EUCLEAN) { 741 if (ret < 0 && !mtd_is_bitflip(ret)) {
742 oldeb = d->eb_data + oldblock / d->pages_per_eblk; 742 oldeb = d->eb_data + oldblock / d->pages_per_eblk;
743 oldeb->flags |= EBLOCK_READERR; 743 oldeb->flags |= EBLOCK_READERR;
744 744
@@ -931,7 +931,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
931 struct mtd_oob_ops ops; 931 struct mtd_oob_ops ops;
932 int ret; 932 int ret;
933 933
934 ops.mode = MTD_OOB_AUTO; 934 ops.mode = MTD_OPS_AUTO_OOB;
935 ops.len = mtd->writesize; 935 ops.len = mtd->writesize;
936 ops.ooblen = mtd->ecclayout->oobavail; 936 ops.ooblen = mtd->ecclayout->oobavail;
937 ops.ooboffs = 0; 937 ops.ooboffs = 0;
@@ -1016,7 +1016,7 @@ static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background)
1016 1016
1017 if (ret == 0) 1017 if (ret == 0)
1018 mtdswap_rb_add(d, eb, MTDSWAP_CLEAN); 1018 mtdswap_rb_add(d, eb, MTDSWAP_CLEAN);
1019 else if (ret != -EIO && ret != -EBADMSG) 1019 else if (ret != -EIO && !mtd_is_eccerr(ret))
1020 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); 1020 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
1021 1021
1022 return 0; 1022 return 0;
@@ -1164,7 +1164,7 @@ retry:
1164 ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf); 1164 ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf);
1165 1165
1166 d->mtd_read_count++; 1166 d->mtd_read_count++;
1167 if (ret == -EUCLEAN) { 1167 if (mtd_is_bitflip(ret)) {
1168 eb->flags |= EBLOCK_BITFLIP; 1168 eb->flags |= EBLOCK_BITFLIP;
1169 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); 1169 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
1170 ret = 0; 1170 ret = 0;
@@ -1374,11 +1374,10 @@ static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
1374 goto revmap_fail; 1374 goto revmap_fail;
1375 1375
1376 eblk_bytes = sizeof(struct swap_eb)*d->eblks; 1376 eblk_bytes = sizeof(struct swap_eb)*d->eblks;
1377 d->eb_data = vmalloc(eblk_bytes); 1377 d->eb_data = vzalloc(eblk_bytes);
1378 if (!d->eb_data) 1378 if (!d->eb_data)
1379 goto eb_data_fail; 1379 goto eb_data_fail;
1380 1380
1381 memset(d->eb_data, 0, eblk_bytes);
1382 for (i = 0; i < pages; i++) 1381 for (i = 0; i < pages; i++)
1383 d->page_data[i] = BLOCK_UNDEF; 1382 d->page_data[i] = BLOCK_UNDEF;
1384 1383
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index dbfa0f7fb464..cce7b70824c3 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -83,16 +83,9 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
83 scratch register here to enable this feature. On Intel Moorestown 83 scratch register here to enable this feature. On Intel Moorestown
84 boards, the scratch register is at 0xFF108018. 84 boards, the scratch register is at 0xFF108018.
85 85
86config MTD_NAND_EDB7312
87 tristate "Support for Cirrus Logic EBD7312 evaluation board"
88 depends on ARCH_EDB7312
89 help
90 This enables the driver for the Cirrus Logic EBD7312 evaluation
91 board to access the onboard NAND Flash.
92
93config MTD_NAND_H1900 86config MTD_NAND_H1900
94 tristate "iPAQ H1900 flash" 87 tristate "iPAQ H1900 flash"
95 depends on ARCH_PXA 88 depends on ARCH_PXA && BROKEN
96 help 89 help
97 This enables the driver for the iPAQ h1900 flash. 90 This enables the driver for the iPAQ h1900 flash.
98 91
@@ -116,10 +109,11 @@ config MTD_NAND_AMS_DELTA
116 Support for NAND flash on Amstrad E3 (Delta). 109 Support for NAND flash on Amstrad E3 (Delta).
117 110
118config MTD_NAND_OMAP2 111config MTD_NAND_OMAP2
119 tristate "NAND Flash device on OMAP2 and OMAP3" 112 tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4"
120 depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3) 113 depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4)
121 help 114 help
122 Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms. 115 Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
116 platforms.
123 117
124config MTD_NAND_IDS 118config MTD_NAND_IDS
125 tristate 119 tristate
@@ -423,6 +417,19 @@ config MTD_NAND_NANDSIM
423 The simulator may simulate various NAND flash chips for the 417 The simulator may simulate various NAND flash chips for the
424 MTD nand layer. 418 MTD nand layer.
425 419
420config MTD_NAND_GPMI_NAND
421 bool "GPMI NAND Flash Controller driver"
422 depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
423 select MTD_PARTITIONS
424 select MTD_CMDLINE_PARTS
425 help
426 Enables NAND Flash support for IMX23 or IMX28.
427 The GPMI controller is very powerful, with the help of BCH
428 module, it can do the hardware ECC. The GPMI supports several
429 NAND flashs at the same time. The GPMI may conflicts with other
430 block, such as SD card. So pay attention to it when you enable
431 the GPMI.
432
426config MTD_NAND_PLATFORM 433config MTD_NAND_PLATFORM
427 tristate "Support for generic platform NAND driver" 434 tristate "Support for generic platform NAND driver"
428 help 435 help
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 5745d831168e..618f4ba23699 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
13obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o 13obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
14obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o 14obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
15obj-$(CONFIG_MTD_NAND_DENALI) += denali.o 15obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
16obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
17obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o 16obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
18obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o 17obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
19obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o 18obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
@@ -49,5 +48,6 @@ obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
49obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o 48obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
50obj-$(CONFIG_MTD_NAND_RICOH) += r852.o 49obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
51obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o 50obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
51obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
52 52
53nand-objs := nand_base.o nand_bbt.o 53nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 55da20ccc7a8..23e5d77c39fc 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -161,37 +161,6 @@ static int atmel_nand_device_ready(struct mtd_info *mtd)
161 !!host->board->rdy_pin_active_low; 161 !!host->board->rdy_pin_active_low;
162} 162}
163 163
164/*
165 * Minimal-overhead PIO for data access.
166 */
167static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
168{
169 struct nand_chip *nand_chip = mtd->priv;
170
171 __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
172}
173
174static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
175{
176 struct nand_chip *nand_chip = mtd->priv;
177
178 __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
179}
180
181static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
182{
183 struct nand_chip *nand_chip = mtd->priv;
184
185 __raw_writesb(nand_chip->IO_ADDR_W, buf, len);
186}
187
188static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
189{
190 struct nand_chip *nand_chip = mtd->priv;
191
192 __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
193}
194
195static void dma_complete_func(void *completion) 164static void dma_complete_func(void *completion)
196{ 165{
197 complete(completion); 166 complete(completion);
@@ -266,33 +235,27 @@ err_buf:
266static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) 235static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
267{ 236{
268 struct nand_chip *chip = mtd->priv; 237 struct nand_chip *chip = mtd->priv;
269 struct atmel_nand_host *host = chip->priv;
270 238
271 if (use_dma && len > mtd->oobsize) 239 if (use_dma && len > mtd->oobsize)
272 /* only use DMA for bigger than oob size: better performances */ 240 /* only use DMA for bigger than oob size: better performances */
273 if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) 241 if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
274 return; 242 return;
275 243
276 if (host->board->bus_width_16) 244 /* if no DMA operation possible, use PIO */
277 atmel_read_buf16(mtd, buf, len); 245 memcpy_fromio(buf, chip->IO_ADDR_R, len);
278 else
279 atmel_read_buf8(mtd, buf, len);
280} 246}
281 247
282static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) 248static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
283{ 249{
284 struct nand_chip *chip = mtd->priv; 250 struct nand_chip *chip = mtd->priv;
285 struct atmel_nand_host *host = chip->priv;
286 251
287 if (use_dma && len > mtd->oobsize) 252 if (use_dma && len > mtd->oobsize)
288 /* only use DMA for bigger than oob size: better performances */ 253 /* only use DMA for bigger than oob size: better performances */
289 if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) 254 if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
290 return; 255 return;
291 256
292 if (host->board->bus_width_16) 257 /* if no DMA operation possible, use PIO */
293 atmel_write_buf16(mtd, buf, len); 258 memcpy_toio(chip->IO_ADDR_W, buf, len);
294 else
295 atmel_write_buf8(mtd, buf, len);
296} 259}
297 260
298/* 261/*
@@ -481,10 +444,6 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
481 } 444 }
482} 445}
483 446
484#ifdef CONFIG_MTD_CMDLINE_PARTS
485static const char *part_probes[] = { "cmdlinepart", NULL };
486#endif
487
488/* 447/*
489 * Probe for the NAND device. 448 * Probe for the NAND device.
490 */ 449 */
@@ -496,8 +455,6 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
496 struct resource *regs; 455 struct resource *regs;
497 struct resource *mem; 456 struct resource *mem;
498 int res; 457 int res;
499 struct mtd_partition *partitions = NULL;
500 int num_partitions = 0;
501 458
502 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 459 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
503 if (!mem) { 460 if (!mem) {
@@ -583,7 +540,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
583 540
584 if (on_flash_bbt) { 541 if (on_flash_bbt) {
585 printk(KERN_INFO "atmel_nand: Use On Flash BBT\n"); 542 printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
586 nand_chip->options |= NAND_USE_FLASH_BBT; 543 nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
587 } 544 }
588 545
589 if (!cpu_has_dma()) 546 if (!cpu_has_dma())
@@ -594,7 +551,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
594 551
595 dma_cap_zero(mask); 552 dma_cap_zero(mask);
596 dma_cap_set(DMA_MEMCPY, mask); 553 dma_cap_set(DMA_MEMCPY, mask);
597 host->dma_chan = dma_request_channel(mask, 0, NULL); 554 host->dma_chan = dma_request_channel(mask, NULL, NULL);
598 if (!host->dma_chan) { 555 if (!host->dma_chan) {
599 dev_err(host->dev, "Failed to request DMA channel\n"); 556 dev_err(host->dev, "Failed to request DMA channel\n");
600 use_dma = 0; 557 use_dma = 0;
@@ -655,27 +612,12 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
655 goto err_scan_tail; 612 goto err_scan_tail;
656 } 613 }
657 614
658#ifdef CONFIG_MTD_CMDLINE_PARTS
659 mtd->name = "atmel_nand"; 615 mtd->name = "atmel_nand";
660 num_partitions = parse_mtd_partitions(mtd, part_probes, 616 res = mtd_device_parse_register(mtd, NULL, 0,
661 &partitions, 0); 617 host->board->parts, host->board->num_parts);
662#endif
663 if (num_partitions <= 0 && host->board->partition_info)
664 partitions = host->board->partition_info(mtd->size,
665 &num_partitions);
666
667 if ((!partitions) || (num_partitions == 0)) {
668 printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
669 res = -ENXIO;
670 goto err_no_partitions;
671 }
672
673 res = mtd_device_register(mtd, partitions, num_partitions);
674 if (!res) 618 if (!res)
675 return res; 619 return res;
676 620
677err_no_partitions:
678 nand_release(mtd);
679err_scan_tail: 621err_scan_tail:
680err_scan_ident: 622err_scan_ident:
681err_no_card: 623err_no_card:
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index fa5736b9286c..7dd3700f2303 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -52,7 +52,7 @@ static const struct mtd_partition partition_info[] = {
52 * au_read_byte - read one byte from the chip 52 * au_read_byte - read one byte from the chip
53 * @mtd: MTD device structure 53 * @mtd: MTD device structure
54 * 54 *
55 * read function for 8bit buswith 55 * read function for 8bit buswidth
56 */ 56 */
57static u_char au_read_byte(struct mtd_info *mtd) 57static u_char au_read_byte(struct mtd_info *mtd)
58{ 58{
@@ -67,7 +67,7 @@ static u_char au_read_byte(struct mtd_info *mtd)
67 * @mtd: MTD device structure 67 * @mtd: MTD device structure
68 * @byte: pointer to data byte to write 68 * @byte: pointer to data byte to write
69 * 69 *
70 * write function for 8it buswith 70 * write function for 8it buswidth
71 */ 71 */
72static void au_write_byte(struct mtd_info *mtd, u_char byte) 72static void au_write_byte(struct mtd_info *mtd, u_char byte)
73{ 73{
@@ -77,11 +77,10 @@ static void au_write_byte(struct mtd_info *mtd, u_char byte)
77} 77}
78 78
79/** 79/**
80 * au_read_byte16 - read one byte endianess aware from the chip 80 * au_read_byte16 - read one byte endianness aware from the chip
81 * @mtd: MTD device structure 81 * @mtd: MTD device structure
82 * 82 *
83 * read function for 16bit buswith with 83 * read function for 16bit buswidth with endianness conversion
84 * endianess conversion
85 */ 84 */
86static u_char au_read_byte16(struct mtd_info *mtd) 85static u_char au_read_byte16(struct mtd_info *mtd)
87{ 86{
@@ -92,12 +91,11 @@ static u_char au_read_byte16(struct mtd_info *mtd)
92} 91}
93 92
94/** 93/**
95 * au_write_byte16 - write one byte endianess aware to the chip 94 * au_write_byte16 - write one byte endianness aware to the chip
96 * @mtd: MTD device structure 95 * @mtd: MTD device structure
97 * @byte: pointer to data byte to write 96 * @byte: pointer to data byte to write
98 * 97 *
99 * write function for 16bit buswith with 98 * write function for 16bit buswidth with endianness conversion
100 * endianess conversion
101 */ 99 */
102static void au_write_byte16(struct mtd_info *mtd, u_char byte) 100static void au_write_byte16(struct mtd_info *mtd, u_char byte)
103{ 101{
@@ -110,8 +108,7 @@ static void au_write_byte16(struct mtd_info *mtd, u_char byte)
110 * au_read_word - read one word from the chip 108 * au_read_word - read one word from the chip
111 * @mtd: MTD device structure 109 * @mtd: MTD device structure
112 * 110 *
113 * read function for 16bit buswith without 111 * read function for 16bit buswidth without endianness conversion
114 * endianess conversion
115 */ 112 */
116static u16 au_read_word(struct mtd_info *mtd) 113static u16 au_read_word(struct mtd_info *mtd)
117{ 114{
@@ -127,7 +124,7 @@ static u16 au_read_word(struct mtd_info *mtd)
127 * @buf: data buffer 124 * @buf: data buffer
128 * @len: number of bytes to write 125 * @len: number of bytes to write
129 * 126 *
130 * write function for 8bit buswith 127 * write function for 8bit buswidth
131 */ 128 */
132static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len) 129static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
133{ 130{
@@ -146,7 +143,7 @@ static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
146 * @buf: buffer to store date 143 * @buf: buffer to store date
147 * @len: number of bytes to read 144 * @len: number of bytes to read
148 * 145 *
149 * read function for 8bit buswith 146 * read function for 8bit buswidth
150 */ 147 */
151static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len) 148static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
152{ 149{
@@ -165,7 +162,7 @@ static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
165 * @buf: buffer containing the data to compare 162 * @buf: buffer containing the data to compare
166 * @len: number of bytes to compare 163 * @len: number of bytes to compare
167 * 164 *
168 * verify function for 8bit buswith 165 * verify function for 8bit buswidth
169 */ 166 */
170static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) 167static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
171{ 168{
@@ -187,7 +184,7 @@ static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
187 * @buf: data buffer 184 * @buf: data buffer
188 * @len: number of bytes to write 185 * @len: number of bytes to write
189 * 186 *
190 * write function for 16bit buswith 187 * write function for 16bit buswidth
191 */ 188 */
192static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len) 189static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
193{ 190{
@@ -209,7 +206,7 @@ static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
209 * @buf: buffer to store date 206 * @buf: buffer to store date
210 * @len: number of bytes to read 207 * @len: number of bytes to read
211 * 208 *
212 * read function for 16bit buswith 209 * read function for 16bit buswidth
213 */ 210 */
214static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len) 211static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
215{ 212{
@@ -230,7 +227,7 @@ static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
230 * @buf: buffer containing the data to compare 227 * @buf: buffer containing the data to compare
231 * @len: number of bytes to compare 228 * @len: number of bytes to compare
232 * 229 *
233 * verify function for 16bit buswith 230 * verify function for 16bit buswidth
234 */ 231 */
235static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len) 232static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
236{ 233{
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
index eddc9a224985..2e42ec2e8ff4 100644
--- a/drivers/mtd/nand/autcpu12.c
+++ b/drivers/mtd/nand/autcpu12.c
@@ -172,9 +172,9 @@ static int __init autcpu12_init(void)
172 172
173 /* Enable the following for a flash based bad block table */ 173 /* Enable the following for a flash based bad block table */
174 /* 174 /*
175 this->options = NAND_USE_FLASH_BBT; 175 this->bbt_options = NAND_BBT_USE_FLASH;
176 */ 176 */
177 this->options = NAND_USE_FLASH_BBT; 177 this->bbt_options = NAND_BBT_USE_FLASH;
178 178
179 /* Scan to find existence of the device */ 179 /* Scan to find existence of the device */
180 if (nand_scan(autcpu12_mtd, 1)) { 180 if (nand_scan(autcpu12_mtd, 1)) {
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 8c569e454dc5..46b58d672847 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -52,8 +52,6 @@
52static const __devinitconst char gBanner[] = KERN_INFO \ 52static const __devinitconst char gBanner[] = KERN_INFO \
53 "BCM UMI MTD NAND Driver: 1.00\n"; 53 "BCM UMI MTD NAND Driver: 1.00\n";
54 54
55const char *part_probes[] = { "cmdlinepart", NULL };
56
57#if NAND_ECC_BCH 55#if NAND_ECC_BCH
58static uint8_t scan_ff_pattern[] = { 0xff }; 56static uint8_t scan_ff_pattern[] = { 0xff };
59 57
@@ -376,16 +374,18 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
376 374
377 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 375 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
378 376
379 if (!r) 377 if (!r) {
380 return -ENXIO; 378 err = -ENXIO;
379 goto out_free;
380 }
381 381
382 /* map physical address */ 382 /* map physical address */
383 bcm_umi_io_base = ioremap(r->start, resource_size(r)); 383 bcm_umi_io_base = ioremap(r->start, resource_size(r));
384 384
385 if (!bcm_umi_io_base) { 385 if (!bcm_umi_io_base) {
386 printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n"); 386 printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n");
387 kfree(board_mtd); 387 err = -EIO;
388 return -EIO; 388 goto out_free;
389 } 389 }
390 390
391 /* Get pointer to private data */ 391 /* Get pointer to private data */
@@ -401,9 +401,8 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
401 /* Initialize the NAND hardware. */ 401 /* Initialize the NAND hardware. */
402 if (bcm_umi_nand_inithw() < 0) { 402 if (bcm_umi_nand_inithw() < 0) {
403 printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n"); 403 printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n");
404 iounmap(bcm_umi_io_base); 404 err = -EIO;
405 kfree(board_mtd); 405 goto out_unmap;
406 return -EIO;
407 } 406 }
408 407
409 /* Set address of NAND IO lines */ 408 /* Set address of NAND IO lines */
@@ -436,7 +435,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
436#if USE_DMA 435#if USE_DMA
437 err = nand_dma_init(); 436 err = nand_dma_init();
438 if (err != 0) 437 if (err != 0)
439 return err; 438 goto out_unmap;
440#endif 439#endif
441 440
442 /* Figure out the size of the device that we have. 441 /* Figure out the size of the device that we have.
@@ -447,9 +446,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
447 err = nand_scan_ident(board_mtd, 1, NULL); 446 err = nand_scan_ident(board_mtd, 1, NULL);
448 if (err) { 447 if (err) {
449 printk(KERN_ERR "nand_scan failed: %d\n", err); 448 printk(KERN_ERR "nand_scan failed: %d\n", err);
450 iounmap(bcm_umi_io_base); 449 goto out_unmap;
451 kfree(board_mtd);
452 return err;
453 } 450 }
454 451
455 /* Now that we know the nand size, we can setup the ECC layout */ 452 /* Now that we know the nand size, we can setup the ECC layout */
@@ -468,13 +465,14 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
468 { 465 {
469 printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n", 466 printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n",
470 board_mtd->writesize); 467 board_mtd->writesize);
471 return -EINVAL; 468 err = -EINVAL;
469 goto out_unmap;
472 } 470 }
473 } 471 }
474 472
475#if NAND_ECC_BCH 473#if NAND_ECC_BCH
476 if (board_mtd->writesize > 512) { 474 if (board_mtd->writesize > 512) {
477 if (this->options & NAND_USE_FLASH_BBT) 475 if (this->bbt_options & NAND_BBT_USE_FLASH)
478 largepage_bbt.options = NAND_BBT_SCAN2NDPAGE; 476 largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
479 this->badblock_pattern = &largepage_bbt; 477 this->badblock_pattern = &largepage_bbt;
480 } 478 }
@@ -485,33 +483,20 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
485 err = nand_scan_tail(board_mtd); 483 err = nand_scan_tail(board_mtd);
486 if (err) { 484 if (err) {
487 printk(KERN_ERR "nand_scan failed: %d\n", err); 485 printk(KERN_ERR "nand_scan failed: %d\n", err);
488 iounmap(bcm_umi_io_base); 486 goto out_unmap;
489 kfree(board_mtd);
490 return err;
491 } 487 }
492 488
493 /* Register the partitions */ 489 /* Register the partitions */
494 { 490 board_mtd->name = "bcm_umi-nand";
495 int nr_partitions; 491 mtd_device_parse_register(board_mtd, NULL, 0, NULL, 0);
496 struct mtd_partition *partition_info;
497
498 board_mtd->name = "bcm_umi-nand";
499 nr_partitions =
500 parse_mtd_partitions(board_mtd, part_probes,
501 &partition_info, 0);
502
503 if (nr_partitions <= 0) {
504 printk(KERN_ERR "BCM UMI NAND: Too few partitions - %d\n",
505 nr_partitions);
506 iounmap(bcm_umi_io_base);
507 kfree(board_mtd);
508 return -EIO;
509 }
510 mtd_device_register(board_mtd, partition_info, nr_partitions);
511 }
512 492
513 /* Return happy */ 493 /* Return happy */
514 return 0; 494 return 0;
495out_unmap:
496 iounmap(bcm_umi_io_base);
497out_free:
498 kfree(board_mtd);
499 return err;
515} 500}
516 501
517static int bcm_umi_nand_remove(struct platform_device *pdev) 502static int bcm_umi_nand_remove(struct platform_device *pdev)
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 7c8df837d3b8..72d3f23490c5 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -58,7 +58,6 @@
58 58
59struct cafe_priv { 59struct cafe_priv {
60 struct nand_chip nand; 60 struct nand_chip nand;
61 struct mtd_partition *parts;
62 struct pci_dev *pdev; 61 struct pci_dev *pdev;
63 void __iomem *mmio; 62 void __iomem *mmio;
64 struct rs_control *rs; 63 struct rs_control *rs;
@@ -372,7 +371,7 @@ static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
372 return 1; 371 return 1;
373} 372}
374/** 373/**
375 * cafe_nand_read_page_syndrome - {REPLACABLE] hardware ecc syndrom based page read 374 * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
376 * @mtd: mtd info structure 375 * @mtd: mtd info structure
377 * @chip: nand chip info structure 376 * @chip: nand chip info structure
378 * @buf: buffer to store read data 377 * @buf: buffer to store read data
@@ -631,8 +630,6 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
631 struct cafe_priv *cafe; 630 struct cafe_priv *cafe;
632 uint32_t ctrl; 631 uint32_t ctrl;
633 int err = 0; 632 int err = 0;
634 struct mtd_partition *parts;
635 int nr_parts;
636 633
637 /* Very old versions shared the same PCI ident for all three 634 /* Very old versions shared the same PCI ident for all three
638 functions on the chip. Verify the class too... */ 635 functions on the chip. Verify the class too... */
@@ -687,7 +684,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
687 cafe->nand.chip_delay = 0; 684 cafe->nand.chip_delay = 0;
688 685
689 /* Enable the following for a flash based bad block table */ 686 /* Enable the following for a flash based bad block table */
690 cafe->nand.options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR | NAND_OWN_BUFFERS; 687 cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
688 cafe->nand.options = NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;
691 689
692 if (skipbbt) { 690 if (skipbbt) {
693 cafe->nand.options |= NAND_SKIP_BBTSCAN; 691 cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -800,18 +798,9 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
800 798
801 pci_set_drvdata(pdev, mtd); 799 pci_set_drvdata(pdev, mtd);
802 800
803 /* We register the whole device first, separate from the partitions */
804 mtd_device_register(mtd, NULL, 0);
805
806#ifdef CONFIG_MTD_CMDLINE_PARTS
807 mtd->name = "cafe_nand"; 801 mtd->name = "cafe_nand";
808#endif 802 mtd_device_parse_register(mtd, part_probes, 0, NULL, 0);
809 nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0); 803
810 if (nr_parts > 0) {
811 cafe->parts = parts;
812 dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts);
813 mtd_device_register(mtd, parts, nr_parts);
814 }
815 goto out; 804 goto out;
816 805
817 out_irq: 806 out_irq:
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index be33b0f4634d..737ef9a04fdb 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -51,8 +51,6 @@ static struct mtd_partition partition_info[] = {
51}; 51};
52#define NUM_PARTITIONS (ARRAY_SIZE(partition_info)) 52#define NUM_PARTITIONS (ARRAY_SIZE(partition_info))
53 53
54const char *part_probes[] = { "cmdlinepart", NULL };
55
56static u_char cmx270_read_byte(struct mtd_info *mtd) 54static u_char cmx270_read_byte(struct mtd_info *mtd)
57{ 55{
58 struct nand_chip *this = mtd->priv; 56 struct nand_chip *this = mtd->priv;
@@ -152,9 +150,6 @@ static int cmx270_device_ready(struct mtd_info *mtd)
152static int __init cmx270_init(void) 150static int __init cmx270_init(void)
153{ 151{
154 struct nand_chip *this; 152 struct nand_chip *this;
155 const char *part_type;
156 struct mtd_partition *mtd_parts;
157 int mtd_parts_nb = 0;
158 int ret; 153 int ret;
159 154
160 if (!(machine_is_armcore() && cpu_is_pxa27x())) 155 if (!(machine_is_armcore() && cpu_is_pxa27x()))
@@ -223,23 +218,9 @@ static int __init cmx270_init(void)
223 goto err_scan; 218 goto err_scan;
224 } 219 }
225 220
226#ifdef CONFIG_MTD_CMDLINE_PARTS
227 mtd_parts_nb = parse_mtd_partitions(cmx270_nand_mtd, part_probes,
228 &mtd_parts, 0);
229 if (mtd_parts_nb > 0)
230 part_type = "command line";
231 else
232 mtd_parts_nb = 0;
233#endif
234 if (!mtd_parts_nb) {
235 mtd_parts = partition_info;
236 mtd_parts_nb = NUM_PARTITIONS;
237 part_type = "static";
238 }
239
240 /* Register the partitions */ 221 /* Register the partitions */
241 pr_notice("Using %s partition definition\n", part_type); 222 ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, 0,
242 ret = mtd_device_register(cmx270_nand_mtd, mtd_parts, mtd_parts_nb); 223 partition_info, NUM_PARTITIONS);
243 if (ret) 224 if (ret)
244 goto err_scan; 225 goto err_scan;
245 226
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index f59ad1f2d5db..414afa793563 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -239,7 +239,8 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
239 this->ecc.correct = nand_correct_data; 239 this->ecc.correct = nand_correct_data;
240 240
241 /* Enable the following for a flash based bad block table */ 241 /* Enable the following for a flash based bad block table */
242 this->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR; 242 this->bbt_options = NAND_BBT_USE_FLASH;
243 this->options = NAND_NO_AUTOINCR;
243 244
244 /* Scan to find existence of the device */ 245 /* Scan to find existence of the device */
245 if (nand_scan(new_mtd, 1)) { 246 if (nand_scan(new_mtd, 1)) {
@@ -277,15 +278,11 @@ static int is_geode(void)
277 return 0; 278 return 0;
278} 279}
279 280
280static const char *part_probes[] = { "cmdlinepart", NULL };
281
282static int __init cs553x_init(void) 281static int __init cs553x_init(void)
283{ 282{
284 int err = -ENXIO; 283 int err = -ENXIO;
285 int i; 284 int i;
286 uint64_t val; 285 uint64_t val;
287 int mtd_parts_nb = 0;
288 struct mtd_partition *mtd_parts = NULL;
289 286
290 /* If the CPU isn't a Geode GX or LX, abort */ 287 /* If the CPU isn't a Geode GX or LX, abort */
291 if (!is_geode()) 288 if (!is_geode())
@@ -315,13 +312,9 @@ static int __init cs553x_init(void)
315 do mtdconcat etc. if we want to. */ 312 do mtdconcat etc. if we want to. */
316 for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { 313 for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
317 if (cs553x_mtd[i]) { 314 if (cs553x_mtd[i]) {
318
319 /* If any devices registered, return success. Else the last error. */ 315 /* If any devices registered, return success. Else the last error. */
320 mtd_parts_nb = parse_mtd_partitions(cs553x_mtd[i], part_probes, &mtd_parts, 0); 316 mtd_device_parse_register(cs553x_mtd[i], NULL, 0,
321 if (mtd_parts_nb > 0) 317 NULL, 0);
322 printk(KERN_NOTICE "Using command line partition definition\n");
323 mtd_device_register(cs553x_mtd[i], mtd_parts,
324 mtd_parts_nb);
325 err = 0; 318 err = 0;
326 } 319 }
327 } 320 }
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 1f34951ae1a7..c153e1f77f90 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -57,7 +57,6 @@ struct davinci_nand_info {
57 57
58 struct device *dev; 58 struct device *dev;
59 struct clk *clk; 59 struct clk *clk;
60 bool partitioned;
61 60
62 bool is_readmode; 61 bool is_readmode;
63 62
@@ -530,8 +529,6 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
530 int ret; 529 int ret;
531 uint32_t val; 530 uint32_t val;
532 nand_ecc_modes_t ecc_mode; 531 nand_ecc_modes_t ecc_mode;
533 struct mtd_partition *mtd_parts = NULL;
534 int mtd_parts_nb = 0;
535 532
536 /* insist on board-specific configuration */ 533 /* insist on board-specific configuration */
537 if (!pdata) 534 if (!pdata)
@@ -581,7 +578,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
581 info->chip.chip_delay = 0; 578 info->chip.chip_delay = 0;
582 info->chip.select_chip = nand_davinci_select_chip; 579 info->chip.select_chip = nand_davinci_select_chip;
583 580
584 /* options such as NAND_USE_FLASH_BBT or 16-bit widths */ 581 /* options such as NAND_BBT_USE_FLASH */
582 info->chip.bbt_options = pdata->bbt_options;
583 /* options such as 16-bit widths */
585 info->chip.options = pdata->options; 584 info->chip.options = pdata->options;
586 info->chip.bbt_td = pdata->bbt_td; 585 info->chip.bbt_td = pdata->bbt_td;
587 info->chip.bbt_md = pdata->bbt_md; 586 info->chip.bbt_md = pdata->bbt_md;
@@ -751,33 +750,8 @@ syndrome_done:
751 if (ret < 0) 750 if (ret < 0)
752 goto err_scan; 751 goto err_scan;
753 752
754 if (mtd_has_cmdlinepart()) { 753 ret = mtd_device_parse_register(&info->mtd, NULL, 0,
755 static const char *probes[] __initconst = { 754 pdata->parts, pdata->nr_parts);
756 "cmdlinepart", NULL
757 };
758
759 mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
760 &mtd_parts, 0);
761 }
762
763 if (mtd_parts_nb <= 0) {
764 mtd_parts = pdata->parts;
765 mtd_parts_nb = pdata->nr_parts;
766 }
767
768 /* Register any partitions */
769 if (mtd_parts_nb > 0) {
770 ret = mtd_device_register(&info->mtd, mtd_parts,
771 mtd_parts_nb);
772 if (ret == 0)
773 info->partitioned = true;
774 }
775
776 /* If there's no partition info, just package the whole chip
777 * as a single MTD device.
778 */
779 if (!info->partitioned)
780 ret = mtd_device_register(&info->mtd, NULL, 0) ? -ENODEV : 0;
781 755
782 if (ret < 0) 756 if (ret < 0)
783 goto err_scan; 757 goto err_scan;
@@ -816,9 +790,6 @@ err_nomem:
816static int __exit nand_davinci_remove(struct platform_device *pdev) 790static int __exit nand_davinci_remove(struct platform_device *pdev)
817{ 791{
818 struct davinci_nand_info *info = platform_get_drvdata(pdev); 792 struct davinci_nand_info *info = platform_get_drvdata(pdev);
819 int status;
820
821 status = mtd_device_unregister(&info->mtd);
822 793
823 spin_lock_irq(&davinci_nand_lock); 794 spin_lock_irq(&davinci_nand_lock);
824 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME) 795 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index d5276218945f..3984d488f9ab 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1346,6 +1346,7 @@ static void denali_hw_init(struct denali_nand_info *denali)
1346 * */ 1346 * */
1347 denali->bbtskipbytes = ioread32(denali->flash_reg + 1347 denali->bbtskipbytes = ioread32(denali->flash_reg +
1348 SPARE_AREA_SKIP_BYTES); 1348 SPARE_AREA_SKIP_BYTES);
1349 detect_max_banks(denali);
1349 denali_nand_reset(denali); 1350 denali_nand_reset(denali);
1350 iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED); 1351 iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
1351 iowrite32(CHIP_EN_DONT_CARE__FLAG, 1352 iowrite32(CHIP_EN_DONT_CARE__FLAG,
@@ -1356,7 +1357,6 @@ static void denali_hw_init(struct denali_nand_info *denali)
1356 /* Should set value for these registers when init */ 1357 /* Should set value for these registers when init */
1357 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); 1358 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1358 iowrite32(1, denali->flash_reg + ECC_ENABLE); 1359 iowrite32(1, denali->flash_reg + ECC_ENABLE);
1359 detect_max_banks(denali);
1360 denali_nand_timing_set(denali); 1360 denali_nand_timing_set(denali);
1361 denali_irq_init(denali); 1361 denali_irq_init(denali);
1362} 1362}
@@ -1577,7 +1577,8 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1577 denali->nand.bbt_md = &bbt_mirror_descr; 1577 denali->nand.bbt_md = &bbt_mirror_descr;
1578 1578
1579 /* skip the scan for now until we have OOB read and write support */ 1579 /* skip the scan for now until we have OOB read and write support */
1580 denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN; 1580 denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
1581 denali->nand.options |= NAND_SKIP_BBTSCAN;
1581 denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; 1582 denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
1582 1583
1583 /* Denali Controller only support 15bit and 8bit ECC in MRST, 1584 /* Denali Controller only support 15bit and 8bit ECC in MRST,
@@ -1676,7 +1677,6 @@ static void denali_pci_remove(struct pci_dev *dev)
1676 struct denali_nand_info *denali = pci_get_drvdata(dev); 1677 struct denali_nand_info *denali = pci_get_drvdata(dev);
1677 1678
1678 nand_release(&denali->mtd); 1679 nand_release(&denali->mtd);
1679 mtd_device_unregister(&denali->mtd);
1680 1680
1681 denali_irq_cleanup(dev->irq, denali); 1681 denali_irq_cleanup(dev->irq, denali);
1682 1682
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index e1b84cb90f0d..5780dbab6113 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -133,7 +133,7 @@ static struct rs_control *rs_decoder;
133 133
134/* 134/*
135 * The HW decoder in the DoC ASIC's provides us a error syndrome, 135 * The HW decoder in the DoC ASIC's provides us a error syndrome,
136 * which we must convert to a standard syndrom usable by the generic 136 * which we must convert to a standard syndrome usable by the generic
137 * Reed-Solomon library code. 137 * Reed-Solomon library code.
138 * 138 *
139 * Fabrice Bellard figured this out in the old docecc code. I added 139 * Fabrice Bellard figured this out in the old docecc code. I added
@@ -154,7 +154,7 @@ static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
154 ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2); 154 ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
155 parity = ecc[1]; 155 parity = ecc[1];
156 156
157 /* Initialize the syndrom buffer */ 157 /* Initialize the syndrome buffer */
158 for (i = 0; i < NROOTS; i++) 158 for (i = 0; i < NROOTS; i++)
159 s[i] = ds[0]; 159 s[i] = ds[0];
160 /* 160 /*
@@ -1032,7 +1032,7 @@ static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat,
1032 WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf); 1032 WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
1033 else 1033 else
1034 WriteDOC(DOC_ECC_DIS, docptr, ECCConf); 1034 WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
1035 if (no_ecc_failures && (ret == -EBADMSG)) { 1035 if (no_ecc_failures && mtd_is_eccerr(ret)) {
1036 printk(KERN_ERR "suppressing ECC failure\n"); 1036 printk(KERN_ERR "suppressing ECC failure\n");
1037 ret = 0; 1037 ret = 0;
1038 } 1038 }
@@ -1653,7 +1653,7 @@ static int __init doc_probe(unsigned long physadr)
1653 nand->ecc.mode = NAND_ECC_HW_SYNDROME; 1653 nand->ecc.mode = NAND_ECC_HW_SYNDROME;
1654 nand->ecc.size = 512; 1654 nand->ecc.size = 512;
1655 nand->ecc.bytes = 6; 1655 nand->ecc.bytes = 6;
1656 nand->options = NAND_USE_FLASH_BBT; 1656 nand->bbt_options = NAND_BBT_USE_FLASH;
1657 1657
1658 doc->physadr = physadr; 1658 doc->physadr = physadr;
1659 doc->virtadr = virtadr; 1659 doc->virtadr = virtadr;
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
deleted file mode 100644
index 8400d0f6dada..000000000000
--- a/drivers/mtd/nand/edb7312.c
+++ /dev/null
@@ -1,203 +0,0 @@
1/*
2 * drivers/mtd/nand/edb7312.c
3 *
4 * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
5 *
6 * Derived from drivers/mtd/nand/autcpu12.c
7 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * Overview:
14 * This is a device driver for the NAND flash device found on the
15 * CLEP7312 board which utilizes the Toshiba TC58V64AFT part. This is
16 * a 64Mibit (8MiB x 8 bits) NAND flash device.
17 */
18
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/mtd/mtd.h>
23#include <linux/mtd/nand.h>
24#include <linux/mtd/partitions.h>
25#include <asm/io.h>
26#include <mach/hardware.h> /* for CLPS7111_VIRT_BASE */
27#include <asm/sizes.h>
28#include <asm/hardware/clps7111.h>
29
30/*
31 * MTD structure for EDB7312 board
32 */
33static struct mtd_info *ep7312_mtd = NULL;
34
35/*
36 * Values specific to the EDB7312 board (used with EP7312 processor)
37 */
38#define EP7312_FIO_PBASE 0x10000000 /* Phys address of flash */
39#define EP7312_PXDR 0x0001 /*
40 * IO offset to Port B data register
41 * where the CLE, ALE and NCE pins
42 * are wired to.
43 */
44#define EP7312_PXDDR 0x0041 /*
45 * IO offset to Port B data direction
46 * register so we can control the IO
47 * lines.
48 */
49
50/*
51 * Module stuff
52 */
53
54static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE;
55static void __iomem *ep7312_pxdr = (void __iomem *)EP7312_PXDR;
56static void __iomem *ep7312_pxddr = (void __iomem *)EP7312_PXDDR;
57
58/*
59 * Define static partitions for flash device
60 */
61static struct mtd_partition partition_info[] = {
62 {.name = "EP7312 Nand Flash",
63 .offset = 0,
64 .size = 8 * 1024 * 1024}
65};
66
67#define NUM_PARTITIONS 1
68
69/*
70 * hardware specific access to control-lines
71 *
72 * NAND_NCE: bit 0 -> bit 6 (bit 7 = 1)
73 * NAND_CLE: bit 1 -> bit 4
74 * NAND_ALE: bit 2 -> bit 5
75 */
76static void ep7312_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
77{
78 struct nand_chip *chip = mtd->priv;
79
80 if (ctrl & NAND_CTRL_CHANGE) {
81 unsigned char bits = 0x80;
82
83 bits |= (ctrl & (NAND_CLE | NAND_ALE)) << 3;
84 bits |= (ctrl & NAND_NCE) ? 0x00 : 0x40;
85
86 clps_writeb((clps_readb(ep7312_pxdr) & 0xF0) | bits,
87 ep7312_pxdr);
88 }
89 if (cmd != NAND_CMD_NONE)
90 writeb(cmd, chip->IO_ADDR_W);
91}
92
93/*
94 * read device ready pin
95 */
96static int ep7312_device_ready(struct mtd_info *mtd)
97{
98 return 1;
99}
100
101const char *part_probes[] = { "cmdlinepart", NULL };
102
103/*
104 * Main initialization routine
105 */
106static int __init ep7312_init(void)
107{
108 struct nand_chip *this;
109 const char *part_type = 0;
110 int mtd_parts_nb = 0;
111 struct mtd_partition *mtd_parts = 0;
112 void __iomem *ep7312_fio_base;
113
114 /* Allocate memory for MTD device structure and private data */
115 ep7312_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
116 if (!ep7312_mtd) {
117 printk("Unable to allocate EDB7312 NAND MTD device structure.\n");
118 return -ENOMEM;
119 }
120
121 /* map physical address */
122 ep7312_fio_base = ioremap(ep7312_fio_pbase, SZ_1K);
123 if (!ep7312_fio_base) {
124 printk("ioremap EDB7312 NAND flash failed\n");
125 kfree(ep7312_mtd);
126 return -EIO;
127 }
128
129 /* Get pointer to private data */
130 this = (struct nand_chip *)(&ep7312_mtd[1]);
131
132 /* Initialize structures */
133 memset(ep7312_mtd, 0, sizeof(struct mtd_info));
134 memset(this, 0, sizeof(struct nand_chip));
135
136 /* Link the private data with the MTD structure */
137 ep7312_mtd->priv = this;
138 ep7312_mtd->owner = THIS_MODULE;
139
140 /*
141 * Set GPIO Port B control register so that the pins are configured
142 * to be outputs for controlling the NAND flash.
143 */
144 clps_writeb(0xf0, ep7312_pxddr);
145
146 /* insert callbacks */
147 this->IO_ADDR_R = ep7312_fio_base;
148 this->IO_ADDR_W = ep7312_fio_base;
149 this->cmd_ctrl = ep7312_hwcontrol;
150 this->dev_ready = ep7312_device_ready;
151 /* 15 us command delay time */
152 this->chip_delay = 15;
153
154 /* Scan to find existence of the device */
155 if (nand_scan(ep7312_mtd, 1)) {
156 iounmap((void *)ep7312_fio_base);
157 kfree(ep7312_mtd);
158 return -ENXIO;
159 }
160 ep7312_mtd->name = "edb7312-nand";
161 mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, &mtd_parts, 0);
162 if (mtd_parts_nb > 0)
163 part_type = "command line";
164 else
165 mtd_parts_nb = 0;
166 if (mtd_parts_nb == 0) {
167 mtd_parts = partition_info;
168 mtd_parts_nb = NUM_PARTITIONS;
169 part_type = "static";
170 }
171
172 /* Register the partitions */
173 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
174 mtd_device_register(ep7312_mtd, mtd_parts, mtd_parts_nb);
175
176 /* Return happy */
177 return 0;
178}
179
180module_init(ep7312_init);
181
182/*
183 * Clean up routine
184 */
185static void __exit ep7312_cleanup(void)
186{
187 struct nand_chip *this = (struct nand_chip *)&ep7312_mtd[1];
188
189 /* Release resources, unregister device */
190 nand_release(ap7312_mtd);
191
192 /* Release io resource */
193 iounmap(this->IO_ADDR_R);
194
195 /* Free the MTD device structure */
196 kfree(ep7312_mtd);
197}
198
199module_exit(ep7312_cleanup);
200
201MODULE_LICENSE("GPL");
202MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
203MODULE_DESCRIPTION("MTD map driver for Cogent EDB7312 board");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 33d8aad8bba5..eedd8ee2c9ac 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -75,7 +75,6 @@ struct fsl_elbc_fcm_ctrl {
75 unsigned int use_mdr; /* Non zero if the MDR is to be set */ 75 unsigned int use_mdr; /* Non zero if the MDR is to be set */
76 unsigned int oob; /* Non zero if operating on OOB data */ 76 unsigned int oob; /* Non zero if operating on OOB data */
77 unsigned int counter; /* counter for the initializations */ 77 unsigned int counter; /* counter for the initializations */
78 char *oob_poi; /* Place to write ECC after read back */
79}; 78};
80 79
81/* These map to the positions used by the FCM hardware ECC generator */ 80/* These map to the positions used by the FCM hardware ECC generator */
@@ -244,6 +243,25 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
244 return -EIO; 243 return -EIO;
245 } 244 }
246 245
246 if (chip->ecc.mode != NAND_ECC_HW)
247 return 0;
248
249 if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
250 uint32_t lteccr = in_be32(&lbc->lteccr);
251 /*
252 * if command was a full page read and the ELBC
253 * has the LTECCR register, then bits 12-15 (ppc order) of
254 * LTECCR indicates which 512 byte sub-pages had fixed errors.
255 * bits 28-31 are uncorrectable errors, marked elsewhere.
256 * for small page nand only 1 bit is used.
257 * if the ELBC doesn't have the lteccr register it reads 0
258 */
259 if (lteccr & 0x000F000F)
260 out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
261 if (lteccr & 0x000F0000)
262 mtd->ecc_stats.corrected++;
263 }
264
247 return 0; 265 return 0;
248} 266}
249 267
@@ -435,7 +453,6 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
435 453
436 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ 454 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
437 case NAND_CMD_PAGEPROG: { 455 case NAND_CMD_PAGEPROG: {
438 int full_page;
439 dev_vdbg(priv->dev, 456 dev_vdbg(priv->dev,
440 "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG " 457 "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
441 "writing %d bytes.\n", elbc_fcm_ctrl->index); 458 "writing %d bytes.\n", elbc_fcm_ctrl->index);
@@ -445,34 +462,12 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
445 * write so the HW generates the ECC. 462 * write so the HW generates the ECC.
446 */ 463 */
447 if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 || 464 if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
448 elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize) { 465 elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
449 out_be32(&lbc->fbcr, elbc_fcm_ctrl->index); 466 out_be32(&lbc->fbcr, elbc_fcm_ctrl->index);
450 full_page = 0; 467 else
451 } else {
452 out_be32(&lbc->fbcr, 0); 468 out_be32(&lbc->fbcr, 0);
453 full_page = 1;
454 }
455 469
456 fsl_elbc_run_command(mtd); 470 fsl_elbc_run_command(mtd);
457
458 /* Read back the page in order to fill in the ECC for the
459 * caller. Is this really needed?
460 */
461 if (full_page && elbc_fcm_ctrl->oob_poi) {
462 out_be32(&lbc->fbcr, 3);
463 set_addr(mtd, 6, page_addr, 1);
464
465 elbc_fcm_ctrl->read_bytes = mtd->writesize + 9;
466
467 fsl_elbc_do_read(chip, 1);
468 fsl_elbc_run_command(mtd);
469
470 memcpy_fromio(elbc_fcm_ctrl->oob_poi + 6,
471 &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], 3);
472 elbc_fcm_ctrl->index += 3;
473 }
474
475 elbc_fcm_ctrl->oob_poi = NULL;
476 return; 471 return;
477 } 472 }
478 473
@@ -752,13 +747,8 @@ static void fsl_elbc_write_page(struct mtd_info *mtd,
752 struct nand_chip *chip, 747 struct nand_chip *chip,
753 const uint8_t *buf) 748 const uint8_t *buf)
754{ 749{
755 struct fsl_elbc_mtd *priv = chip->priv;
756 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
757
758 fsl_elbc_write_buf(mtd, buf, mtd->writesize); 750 fsl_elbc_write_buf(mtd, buf, mtd->writesize);
759 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 751 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
760
761 elbc_fcm_ctrl->oob_poi = chip->oob_poi;
762} 752}
763 753
764static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) 754static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
@@ -791,8 +781,8 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
791 chip->bbt_md = &bbt_mirror_descr; 781 chip->bbt_md = &bbt_mirror_descr;
792 782
793 /* set up nand options */ 783 /* set up nand options */
794 chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR | 784 chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
795 NAND_USE_FLASH_BBT; 785 chip->bbt_options = NAND_BBT_USE_FLASH;
796 786
797 chip->controller = &elbc_fcm_ctrl->controller; 787 chip->controller = &elbc_fcm_ctrl->controller;
798 chip->priv = priv; 788 chip->priv = priv;
@@ -829,7 +819,6 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
829 819
830 elbc_fcm_ctrl->chips[priv->bank] = NULL; 820 elbc_fcm_ctrl->chips[priv->bank] = NULL;
831 kfree(priv); 821 kfree(priv);
832 kfree(elbc_fcm_ctrl);
833 return 0; 822 return 0;
834} 823}
835 824
@@ -842,13 +831,14 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
842 struct resource res; 831 struct resource res;
843 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl; 832 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
844 static const char *part_probe_types[] 833 static const char *part_probe_types[]
845 = { "cmdlinepart", "RedBoot", NULL }; 834 = { "cmdlinepart", "RedBoot", "ofpart", NULL };
846 struct mtd_partition *parts;
847 int ret; 835 int ret;
848 int bank; 836 int bank;
849 struct device *dev; 837 struct device *dev;
850 struct device_node *node = pdev->dev.of_node; 838 struct device_node *node = pdev->dev.of_node;
839 struct mtd_part_parser_data ppdata;
851 840
841 ppdata.of_node = pdev->dev.of_node;
852 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs) 842 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
853 return -ENODEV; 843 return -ENODEV;
854 lbc = fsl_lbc_ctrl_dev->regs; 844 lbc = fsl_lbc_ctrl_dev->regs;
@@ -934,17 +924,8 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
934 924
935 /* First look for RedBoot table or partitions on the command 925 /* First look for RedBoot table or partitions on the command
936 * line, these take precedence over device tree information */ 926 * line, these take precedence over device tree information */
937 ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0); 927 mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata,
938 if (ret < 0) 928 NULL, 0);
939 goto err;
940
941 if (ret == 0) {
942 ret = of_mtd_parse_partitions(priv->dev, node, &parts);
943 if (ret < 0)
944 goto err;
945 }
946
947 mtd_device_register(&priv->mtd, parts, ret);
948 929
949 printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n", 930 printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n",
950 (unsigned long long)res.start, priv->bank); 931 (unsigned long long)res.start, priv->bank);
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 23752fd5bc59..b4f3cc9f32fb 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -158,7 +158,7 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
158{ 158{
159 int ret; 159 int ret;
160 struct device_node *flash_np; 160 struct device_node *flash_np;
161 static const char *part_types[] = { "cmdlinepart", NULL, }; 161 struct mtd_part_parser_data ppdata;
162 162
163 fun->chip.IO_ADDR_R = fun->io_base; 163 fun->chip.IO_ADDR_R = fun->io_base;
164 fun->chip.IO_ADDR_W = fun->io_base; 164 fun->chip.IO_ADDR_W = fun->io_base;
@@ -192,18 +192,12 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
192 if (ret) 192 if (ret)
193 goto err; 193 goto err;
194 194
195 ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0); 195 ppdata.of_node = flash_np;
196 196 ret = mtd_device_parse_register(&fun->mtd, NULL, &ppdata, NULL, 0);
197#ifdef CONFIG_MTD_OF_PARTS
198 if (ret == 0) {
199 ret = of_mtd_parse_partitions(fun->dev, flash_np, &fun->parts);
200 if (ret < 0)
201 goto err;
202 }
203#endif
204 ret = mtd_device_register(&fun->mtd, fun->parts, ret);
205err: 197err:
206 of_node_put(flash_np); 198 of_node_put(flash_np);
199 if (ret)
200 kfree(fun->mtd.name);
207 return ret; 201 return ret;
208} 202}
209 203
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index e9b275ac381c..e53b76064133 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -146,7 +146,7 @@ static struct mtd_partition partition_info_16KB_blk[] = {
146 { 146 {
147 .name = "Root File System", 147 .name = "Root File System",
148 .offset = 0x460000, 148 .offset = 0x460000,
149 .size = 0, 149 .size = MTDPART_SIZ_FULL,
150 }, 150 },
151}; 151};
152 152
@@ -173,13 +173,10 @@ static struct mtd_partition partition_info_128KB_blk[] = {
173 { 173 {
174 .name = "Root File System", 174 .name = "Root File System",
175 .offset = 0x800000, 175 .offset = 0x800000,
176 .size = 0, 176 .size = MTDPART_SIZ_FULL,
177 }, 177 },
178}; 178};
179 179
180#ifdef CONFIG_MTD_CMDLINE_PARTS
181const char *part_probes[] = { "cmdlinepart", NULL };
182#endif
183 180
184/** 181/**
185 * struct fsmc_nand_data - structure for FSMC NAND device state 182 * struct fsmc_nand_data - structure for FSMC NAND device state
@@ -187,8 +184,6 @@ const char *part_probes[] = { "cmdlinepart", NULL };
187 * @pid: Part ID on the AMBA PrimeCell format 184 * @pid: Part ID on the AMBA PrimeCell format
188 * @mtd: MTD info for a NAND flash. 185 * @mtd: MTD info for a NAND flash.
189 * @nand: Chip related info for a NAND flash. 186 * @nand: Chip related info for a NAND flash.
190 * @partitions: Partition info for a NAND Flash.
191 * @nr_partitions: Total number of partition of a NAND flash.
192 * 187 *
193 * @ecc_place: ECC placing locations in oobfree type format. 188 * @ecc_place: ECC placing locations in oobfree type format.
194 * @bank: Bank number for probed device. 189 * @bank: Bank number for probed device.
@@ -203,8 +198,6 @@ struct fsmc_nand_data {
203 u32 pid; 198 u32 pid;
204 struct mtd_info mtd; 199 struct mtd_info mtd;
205 struct nand_chip nand; 200 struct nand_chip nand;
206 struct mtd_partition *partitions;
207 unsigned int nr_partitions;
208 201
209 struct fsmc_eccplace *ecc_place; 202 struct fsmc_eccplace *ecc_place;
210 unsigned int bank; 203 unsigned int bank;
@@ -716,65 +709,17 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
716 * platform data, 709 * platform data,
717 * default partition information present in driver. 710 * default partition information present in driver.
718 */ 711 */
719#ifdef CONFIG_MTD_CMDLINE_PARTS
720 /* 712 /*
721 * Check if partition info passed via command line 713 * Check for partition info passed
722 */ 714 */
723 host->mtd.name = "nand"; 715 host->mtd.name = "nand";
724 host->nr_partitions = parse_mtd_partitions(&host->mtd, part_probes, 716 ret = mtd_device_parse_register(&host->mtd, NULL, 0,
725 &host->partitions, 0); 717 host->mtd.size <= 0x04000000 ?
726 if (host->nr_partitions <= 0) { 718 partition_info_16KB_blk :
727#endif 719 partition_info_128KB_blk,
728 /* 720 host->mtd.size <= 0x04000000 ?
729 * Check if partition info passed via command line 721 ARRAY_SIZE(partition_info_16KB_blk) :
730 */ 722 ARRAY_SIZE(partition_info_128KB_blk));
731 if (pdata->partitions) {
732 host->partitions = pdata->partitions;
733 host->nr_partitions = pdata->nr_partitions;
734 } else {
735 struct mtd_partition *partition;
736 int i;
737
738 /* Select the default partitions info */
739 switch (host->mtd.size) {
740 case 0x01000000:
741 case 0x02000000:
742 case 0x04000000:
743 host->partitions = partition_info_16KB_blk;
744 host->nr_partitions =
745 sizeof(partition_info_16KB_blk) /
746 sizeof(struct mtd_partition);
747 break;
748 case 0x08000000:
749 case 0x10000000:
750 case 0x20000000:
751 case 0x40000000:
752 host->partitions = partition_info_128KB_blk;
753 host->nr_partitions =
754 sizeof(partition_info_128KB_blk) /
755 sizeof(struct mtd_partition);
756 break;
757 default:
758 ret = -ENXIO;
759 pr_err("Unsupported NAND size\n");
760 goto err_probe;
761 }
762
763 partition = host->partitions;
764 for (i = 0; i < host->nr_partitions; i++, partition++) {
765 if (partition->size == 0) {
766 partition->size = host->mtd.size -
767 partition->offset;
768 break;
769 }
770 }
771 }
772#ifdef CONFIG_MTD_CMDLINE_PARTS
773 }
774#endif
775
776 ret = mtd_device_register(&host->mtd, host->partitions,
777 host->nr_partitions);
778 if (ret) 723 if (ret)
779 goto err_probe; 724 goto err_probe;
780 725
@@ -822,7 +767,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
822 platform_set_drvdata(pdev, NULL); 767 platform_set_drvdata(pdev, NULL);
823 768
824 if (host) { 769 if (host) {
825 mtd_device_unregister(&host->mtd); 770 nand_release(&host->mtd);
826 clk_disable(host->clk); 771 clk_disable(host->clk);
827 clk_put(host->clk); 772 clk_put(host->clk);
828 773
diff --git a/drivers/mtd/nand/gpmi-nand/Makefile b/drivers/mtd/nand/gpmi-nand/Makefile
new file mode 100644
index 000000000000..3a462487c35e
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o
2gpmi_nand-objs += gpmi-nand.o
3gpmi_nand-objs += gpmi-lib.o
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
new file mode 100644
index 000000000000..4effb8c579db
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -0,0 +1,84 @@
1/*
2 * Freescale GPMI NAND Flash Driver
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 * Copyright 2008 Embedded Alley Solutions, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21#ifndef __GPMI_NAND_BCH_REGS_H
22#define __GPMI_NAND_BCH_REGS_H
23
24#define HW_BCH_CTRL 0x00000000
25#define HW_BCH_CTRL_SET 0x00000004
26#define HW_BCH_CTRL_CLR 0x00000008
27#define HW_BCH_CTRL_TOG 0x0000000c
28
29#define BM_BCH_CTRL_COMPLETE_IRQ_EN (1 << 8)
30#define BM_BCH_CTRL_COMPLETE_IRQ (1 << 0)
31
32#define HW_BCH_STATUS0 0x00000010
33#define HW_BCH_MODE 0x00000020
34#define HW_BCH_ENCODEPTR 0x00000030
35#define HW_BCH_DATAPTR 0x00000040
36#define HW_BCH_METAPTR 0x00000050
37#define HW_BCH_LAYOUTSELECT 0x00000070
38
39#define HW_BCH_FLASH0LAYOUT0 0x00000080
40
41#define BP_BCH_FLASH0LAYOUT0_NBLOCKS 24
42#define BM_BCH_FLASH0LAYOUT0_NBLOCKS (0xff << BP_BCH_FLASH0LAYOUT0_NBLOCKS)
43#define BF_BCH_FLASH0LAYOUT0_NBLOCKS(v) \
44 (((v) << BP_BCH_FLASH0LAYOUT0_NBLOCKS) & BM_BCH_FLASH0LAYOUT0_NBLOCKS)
45
46#define BP_BCH_FLASH0LAYOUT0_META_SIZE 16
47#define BM_BCH_FLASH0LAYOUT0_META_SIZE (0xff << BP_BCH_FLASH0LAYOUT0_META_SIZE)
48#define BF_BCH_FLASH0LAYOUT0_META_SIZE(v) \
49 (((v) << BP_BCH_FLASH0LAYOUT0_META_SIZE)\
50 & BM_BCH_FLASH0LAYOUT0_META_SIZE)
51
52#define BP_BCH_FLASH0LAYOUT0_ECC0 12
53#define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
54#define BF_BCH_FLASH0LAYOUT0_ECC0(v) \
55 (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) & BM_BCH_FLASH0LAYOUT0_ECC0)
56
57#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
58#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
59 (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
60#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v) \
61 (((v) << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)\
62 & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)
63
64#define HW_BCH_FLASH0LAYOUT1 0x00000090
65
66#define BP_BCH_FLASH0LAYOUT1_PAGE_SIZE 16
67#define BM_BCH_FLASH0LAYOUT1_PAGE_SIZE \
68 (0xffff << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE)
69#define BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(v) \
70 (((v) << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE) \
71 & BM_BCH_FLASH0LAYOUT1_PAGE_SIZE)
72
73#define BP_BCH_FLASH0LAYOUT1_ECCN 12
74#define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
75#define BF_BCH_FLASH0LAYOUT1_ECCN(v) \
76 (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) & BM_BCH_FLASH0LAYOUT1_ECCN)
77
78#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
79#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
80 (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
81#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v) \
82 (((v) << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
83 & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)
84#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
new file mode 100644
index 000000000000..de4db7604a3f
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -0,0 +1,1057 @@
1/*
2 * Freescale GPMI NAND Flash Driver
3 *
4 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
5 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21#include <linux/mtd/gpmi-nand.h>
22#include <linux/delay.h>
23#include <linux/clk.h>
24#include <mach/mxs.h>
25
26#include "gpmi-nand.h"
27#include "gpmi-regs.h"
28#include "bch-regs.h"
29
30struct timing_threshod timing_default_threshold = {
31 .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >>
32 BP_GPMI_TIMING0_DATA_SETUP),
33 .internal_data_setup_in_ns = 0,
34 .max_sample_delay_factor = (BM_GPMI_CTRL1_RDN_DELAY >>
35 BP_GPMI_CTRL1_RDN_DELAY),
36 .max_dll_clock_period_in_ns = 32,
37 .max_dll_delay_in_ns = 16,
38};
39
40/*
41 * Clear the bit and poll it cleared. This is usually called with
42 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
43 * (bit 30).
44 */
45static int clear_poll_bit(void __iomem *addr, u32 mask)
46{
47 int timeout = 0x400;
48
49 /* clear the bit */
50 __mxs_clrl(mask, addr);
51
52 /*
53 * SFTRST needs 3 GPMI clocks to settle, the reference manual
54 * recommends to wait 1us.
55 */
56 udelay(1);
57
58 /* poll the bit becoming clear */
59 while ((readl(addr) & mask) && --timeout)
60 /* nothing */;
61
62 return !timeout;
63}
64
65#define MODULE_CLKGATE (1 << 30)
66#define MODULE_SFTRST (1 << 31)
67/*
68 * The current mxs_reset_block() will do two things:
69 * [1] enable the module.
70 * [2] reset the module.
71 *
72 * In most of the cases, it's ok. But there is a hardware bug in the BCH block.
73 * If you try to soft reset the BCH block, it becomes unusable until
74 * the next hard reset. This case occurs in the NAND boot mode. When the board
75 * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
76 * So If the driver tries to reset the BCH again, the BCH will not work anymore.
77 * You will see a DMA timeout in this case.
78 *
79 * To avoid this bug, just add a new parameter `just_enable` for
80 * the mxs_reset_block(), and rewrite it here.
81 */
82int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
83{
84 int ret;
85 int timeout = 0x400;
86
87 /* clear and poll SFTRST */
88 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
89 if (unlikely(ret))
90 goto error;
91
92 /* clear CLKGATE */
93 __mxs_clrl(MODULE_CLKGATE, reset_addr);
94
95 if (!just_enable) {
96 /* set SFTRST to reset the block */
97 __mxs_setl(MODULE_SFTRST, reset_addr);
98 udelay(1);
99
100 /* poll CLKGATE becoming set */
101 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
102 /* nothing */;
103 if (unlikely(!timeout))
104 goto error;
105 }
106
107 /* clear and poll SFTRST */
108 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
109 if (unlikely(ret))
110 goto error;
111
112 /* clear and poll CLKGATE */
113 ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
114 if (unlikely(ret))
115 goto error;
116
117 return 0;
118
119error:
120 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
121 return -ETIMEDOUT;
122}
123
124int gpmi_init(struct gpmi_nand_data *this)
125{
126 struct resources *r = &this->resources;
127 int ret;
128
129 ret = clk_enable(r->clock);
130 if (ret)
131 goto err_out;
132 ret = gpmi_reset_block(r->gpmi_regs, false);
133 if (ret)
134 goto err_out;
135
136 /* Choose NAND mode. */
137 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
138
139 /* Set the IRQ polarity. */
140 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
141 r->gpmi_regs + HW_GPMI_CTRL1_SET);
142
143 /* Disable Write-Protection. */
144 writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
145
146 /* Select BCH ECC. */
147 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
148
149 clk_disable(r->clock);
150 return 0;
151err_out:
152 return ret;
153}
154
155/* This function is very useful. It is called only when the bug occur. */
156void gpmi_dump_info(struct gpmi_nand_data *this)
157{
158 struct resources *r = &this->resources;
159 struct bch_geometry *geo = &this->bch_geometry;
160 u32 reg;
161 int i;
162
163 pr_err("Show GPMI registers :\n");
164 for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
165 reg = readl(r->gpmi_regs + i * 0x10);
166 pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
167 }
168
169 /* start to print out the BCH info */
170 pr_err("BCH Geometry :\n");
171 pr_err("GF length : %u\n", geo->gf_len);
172 pr_err("ECC Strength : %u\n", geo->ecc_strength);
173 pr_err("Page Size in Bytes : %u\n", geo->page_size);
174 pr_err("Metadata Size in Bytes : %u\n", geo->metadata_size);
175 pr_err("ECC Chunk Size in Bytes: %u\n", geo->ecc_chunk_size);
176 pr_err("ECC Chunk Count : %u\n", geo->ecc_chunk_count);
177 pr_err("Payload Size in Bytes : %u\n", geo->payload_size);
178 pr_err("Auxiliary Size in Bytes: %u\n", geo->auxiliary_size);
179 pr_err("Auxiliary Status Offset: %u\n", geo->auxiliary_status_offset);
180 pr_err("Block Mark Byte Offset : %u\n", geo->block_mark_byte_offset);
181 pr_err("Block Mark Bit Offset : %u\n", geo->block_mark_bit_offset);
182}
183
184/* Configures the geometry for BCH. */
185int bch_set_geometry(struct gpmi_nand_data *this)
186{
187 struct resources *r = &this->resources;
188 struct bch_geometry *bch_geo = &this->bch_geometry;
189 unsigned int block_count;
190 unsigned int block_size;
191 unsigned int metadata_size;
192 unsigned int ecc_strength;
193 unsigned int page_size;
194 int ret;
195
196 if (common_nfc_set_geometry(this))
197 return !0;
198
199 block_count = bch_geo->ecc_chunk_count - 1;
200 block_size = bch_geo->ecc_chunk_size;
201 metadata_size = bch_geo->metadata_size;
202 ecc_strength = bch_geo->ecc_strength >> 1;
203 page_size = bch_geo->page_size;
204
205 ret = clk_enable(r->clock);
206 if (ret)
207 goto err_out;
208
209 ret = gpmi_reset_block(r->bch_regs, true);
210 if (ret)
211 goto err_out;
212
213 /* Configure layout 0. */
214 writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
215 | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
216 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength)
217 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size),
218 r->bch_regs + HW_BCH_FLASH0LAYOUT0);
219
220 writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
221 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength)
222 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size),
223 r->bch_regs + HW_BCH_FLASH0LAYOUT1);
224
225 /* Set *all* chip selects to use layout 0. */
226 writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
227
228 /* Enable interrupts. */
229 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
230 r->bch_regs + HW_BCH_CTRL_SET);
231
232 clk_disable(r->clock);
233 return 0;
234err_out:
235 return ret;
236}
237
238/* Converts time in nanoseconds to cycles. */
239static unsigned int ns_to_cycles(unsigned int time,
240 unsigned int period, unsigned int min)
241{
242 unsigned int k;
243
244 k = (time + period - 1) / period;
245 return max(k, min);
246}
247
248/* Apply timing to current hardware conditions. */
249static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
250 struct gpmi_nfc_hardware_timing *hw)
251{
252 struct gpmi_nand_platform_data *pdata = this->pdata;
253 struct timing_threshod *nfc = &timing_default_threshold;
254 struct nand_chip *nand = &this->nand;
255 struct nand_timing target = this->timing;
256 bool improved_timing_is_available;
257 unsigned long clock_frequency_in_hz;
258 unsigned int clock_period_in_ns;
259 bool dll_use_half_periods;
260 unsigned int dll_delay_shift;
261 unsigned int max_sample_delay_in_ns;
262 unsigned int address_setup_in_cycles;
263 unsigned int data_setup_in_ns;
264 unsigned int data_setup_in_cycles;
265 unsigned int data_hold_in_cycles;
266 int ideal_sample_delay_in_ns;
267 unsigned int sample_delay_factor;
268 int tEYE;
269 unsigned int min_prop_delay_in_ns = pdata->min_prop_delay_in_ns;
270 unsigned int max_prop_delay_in_ns = pdata->max_prop_delay_in_ns;
271
272 /*
273 * If there are multiple chips, we need to relax the timings to allow
274 * for signal distortion due to higher capacitance.
275 */
276 if (nand->numchips > 2) {
277 target.data_setup_in_ns += 10;
278 target.data_hold_in_ns += 10;
279 target.address_setup_in_ns += 10;
280 } else if (nand->numchips > 1) {
281 target.data_setup_in_ns += 5;
282 target.data_hold_in_ns += 5;
283 target.address_setup_in_ns += 5;
284 }
285
286 /* Check if improved timing information is available. */
287 improved_timing_is_available =
288 (target.tREA_in_ns >= 0) &&
289 (target.tRLOH_in_ns >= 0) &&
290 (target.tRHOH_in_ns >= 0) ;
291
292 /* Inspect the clock. */
293 clock_frequency_in_hz = nfc->clock_frequency_in_hz;
294 clock_period_in_ns = 1000000000 / clock_frequency_in_hz;
295
296 /*
297 * The NFC quantizes setup and hold parameters in terms of clock cycles.
298 * Here, we quantize the setup and hold timing parameters to the
299 * next-highest clock period to make sure we apply at least the
300 * specified times.
301 *
302 * For data setup and data hold, the hardware interprets a value of zero
303 * as the largest possible delay. This is not what's intended by a zero
304 * in the input parameter, so we impose a minimum of one cycle.
305 */
306 data_setup_in_cycles = ns_to_cycles(target.data_setup_in_ns,
307 clock_period_in_ns, 1);
308 data_hold_in_cycles = ns_to_cycles(target.data_hold_in_ns,
309 clock_period_in_ns, 1);
310 address_setup_in_cycles = ns_to_cycles(target.address_setup_in_ns,
311 clock_period_in_ns, 0);
312
313 /*
314 * The clock's period affects the sample delay in a number of ways:
315 *
316 * (1) The NFC HAL tells us the maximum clock period the sample delay
317 * DLL can tolerate. If the clock period is greater than half that
318 * maximum, we must configure the DLL to be driven by half periods.
319 *
320 * (2) We need to convert from an ideal sample delay, in ns, to a
321 * "sample delay factor," which the NFC uses. This factor depends on
322 * whether we're driving the DLL with full or half periods.
323 * Paraphrasing the reference manual:
324 *
325 * AD = SDF x 0.125 x RP
326 *
327 * where:
328 *
329 * AD is the applied delay, in ns.
330 * SDF is the sample delay factor, which is dimensionless.
331 * RP is the reference period, in ns, which is a full clock period
332 * if the DLL is being driven by full periods, or half that if
333 * the DLL is being driven by half periods.
334 *
335 * Let's re-arrange this in a way that's more useful to us:
336 *
337 * 8
338 * SDF = AD x ----
339 * RP
340 *
341 * The reference period is either the clock period or half that, so this
342 * is:
343 *
344 * 8 AD x DDF
345 * SDF = AD x ----- = --------
346 * f x P P
347 *
348 * where:
349 *
350 * f is 1 or 1/2, depending on how we're driving the DLL.
351 * P is the clock period.
352 * DDF is the DLL Delay Factor, a dimensionless value that
353 * incorporates all the constants in the conversion.
354 *
355 * DDF will be either 8 or 16, both of which are powers of two. We can
356 * reduce the cost of this conversion by using bit shifts instead of
357 * multiplication or division. Thus:
358 *
359 * AD << DDS
360 * SDF = ---------
361 * P
362 *
363 * or
364 *
365 * AD = (SDF >> DDS) x P
366 *
367 * where:
368 *
369 * DDS is the DLL Delay Shift, the logarithm to base 2 of the DDF.
370 */
371 if (clock_period_in_ns > (nfc->max_dll_clock_period_in_ns >> 1)) {
372 dll_use_half_periods = true;
373 dll_delay_shift = 3 + 1;
374 } else {
375 dll_use_half_periods = false;
376 dll_delay_shift = 3;
377 }
378
379 /*
380 * Compute the maximum sample delay the NFC allows, under current
381 * conditions. If the clock is running too slowly, no sample delay is
382 * possible.
383 */
384 if (clock_period_in_ns > nfc->max_dll_clock_period_in_ns)
385 max_sample_delay_in_ns = 0;
386 else {
387 /*
388 * Compute the delay implied by the largest sample delay factor
389 * the NFC allows.
390 */
391 max_sample_delay_in_ns =
392 (nfc->max_sample_delay_factor * clock_period_in_ns) >>
393 dll_delay_shift;
394
395 /*
396 * Check if the implied sample delay larger than the NFC
397 * actually allows.
398 */
399 if (max_sample_delay_in_ns > nfc->max_dll_delay_in_ns)
400 max_sample_delay_in_ns = nfc->max_dll_delay_in_ns;
401 }
402
403 /*
404 * Check if improved timing information is available. If not, we have to
405 * use a less-sophisticated algorithm.
406 */
407 if (!improved_timing_is_available) {
408 /*
409 * Fold the read setup time required by the NFC into the ideal
410 * sample delay.
411 */
412 ideal_sample_delay_in_ns = target.gpmi_sample_delay_in_ns +
413 nfc->internal_data_setup_in_ns;
414
415 /*
416 * The ideal sample delay may be greater than the maximum
417 * allowed by the NFC. If so, we can trade off sample delay time
418 * for more data setup time.
419 *
420 * In each iteration of the following loop, we add a cycle to
421 * the data setup time and subtract a corresponding amount from
422 * the sample delay until we've satisified the constraints or
423 * can't do any better.
424 */
425 while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
426 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
427
428 data_setup_in_cycles++;
429 ideal_sample_delay_in_ns -= clock_period_in_ns;
430
431 if (ideal_sample_delay_in_ns < 0)
432 ideal_sample_delay_in_ns = 0;
433
434 }
435
436 /*
437 * Compute the sample delay factor that corresponds most closely
438 * to the ideal sample delay. If the result is too large for the
439 * NFC, use the maximum value.
440 *
441 * Notice that we use the ns_to_cycles function to compute the
442 * sample delay factor. We do this because the form of the
443 * computation is the same as that for calculating cycles.
444 */
445 sample_delay_factor =
446 ns_to_cycles(
447 ideal_sample_delay_in_ns << dll_delay_shift,
448 clock_period_in_ns, 0);
449
450 if (sample_delay_factor > nfc->max_sample_delay_factor)
451 sample_delay_factor = nfc->max_sample_delay_factor;
452
453 /* Skip to the part where we return our results. */
454 goto return_results;
455 }
456
457 /*
458 * If control arrives here, we have more detailed timing information,
459 * so we can use a better algorithm.
460 */
461
462 /*
463 * Fold the read setup time required by the NFC into the maximum
464 * propagation delay.
465 */
466 max_prop_delay_in_ns += nfc->internal_data_setup_in_ns;
467
468 /*
469 * Earlier, we computed the number of clock cycles required to satisfy
470 * the data setup time. Now, we need to know the actual nanoseconds.
471 */
472 data_setup_in_ns = clock_period_in_ns * data_setup_in_cycles;
473
474 /*
475 * Compute tEYE, the width of the data eye when reading from the NAND
476 * Flash. The eye width is fundamentally determined by the data setup
477 * time, perturbed by propagation delays and some characteristics of the
478 * NAND Flash device.
479 *
480 * start of the eye = max_prop_delay + tREA
481 * end of the eye = min_prop_delay + tRHOH + data_setup
482 */
483 tEYE = (int)min_prop_delay_in_ns + (int)target.tRHOH_in_ns +
484 (int)data_setup_in_ns;
485
486 tEYE -= (int)max_prop_delay_in_ns + (int)target.tREA_in_ns;
487
488 /*
489 * The eye must be open. If it's not, we can try to open it by
490 * increasing its main forcer, the data setup time.
491 *
492 * In each iteration of the following loop, we increase the data setup
493 * time by a single clock cycle. We do this until either the eye is
494 * open or we run into NFC limits.
495 */
496 while ((tEYE <= 0) &&
497 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
498 /* Give a cycle to data setup. */
499 data_setup_in_cycles++;
500 /* Synchronize the data setup time with the cycles. */
501 data_setup_in_ns += clock_period_in_ns;
502 /* Adjust tEYE accordingly. */
503 tEYE += clock_period_in_ns;
504 }
505
506 /*
507 * When control arrives here, the eye is open. The ideal time to sample
508 * the data is in the center of the eye:
509 *
510 * end of the eye + start of the eye
511 * --------------------------------- - data_setup
512 * 2
513 *
514 * After some algebra, this simplifies to the code immediately below.
515 */
516 ideal_sample_delay_in_ns =
517 ((int)max_prop_delay_in_ns +
518 (int)target.tREA_in_ns +
519 (int)min_prop_delay_in_ns +
520 (int)target.tRHOH_in_ns -
521 (int)data_setup_in_ns) >> 1;
522
523 /*
524 * The following figure illustrates some aspects of a NAND Flash read:
525 *
526 *
527 * __ _____________________________________
528 * RDN \_________________/
529 *
530 * <---- tEYE ----->
531 * /-----------------\
532 * Read Data ----------------------------< >---------
533 * \-----------------/
534 * ^ ^ ^ ^
535 * | | | |
536 * |<--Data Setup -->|<--Delay Time -->| |
537 * | | | |
538 * | | |
539 * | |<-- Quantized Delay Time -->|
540 * | | |
541 *
542 *
543 * We have some issues we must now address:
544 *
545 * (1) The *ideal* sample delay time must not be negative. If it is, we
546 * jam it to zero.
547 *
548 * (2) The *ideal* sample delay time must not be greater than that
549 * allowed by the NFC. If it is, we can increase the data setup
550 * time, which will reduce the delay between the end of the data
551 * setup and the center of the eye. It will also make the eye
552 * larger, which might help with the next issue...
553 *
554 * (3) The *quantized* sample delay time must not fall either before the
555 * eye opens or after it closes (the latter is the problem
556 * illustrated in the above figure).
557 */
558
559 /* Jam a negative ideal sample delay to zero. */
560 if (ideal_sample_delay_in_ns < 0)
561 ideal_sample_delay_in_ns = 0;
562
563 /*
564 * Extend the data setup as needed to reduce the ideal sample delay
565 * below the maximum permitted by the NFC.
566 */
567 while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
568 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
569
570 /* Give a cycle to data setup. */
571 data_setup_in_cycles++;
572 /* Synchronize the data setup time with the cycles. */
573 data_setup_in_ns += clock_period_in_ns;
574 /* Adjust tEYE accordingly. */
575 tEYE += clock_period_in_ns;
576
577 /*
578 * Decrease the ideal sample delay by one half cycle, to keep it
579 * in the middle of the eye.
580 */
581 ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
582
583 /* Jam a negative ideal sample delay to zero. */
584 if (ideal_sample_delay_in_ns < 0)
585 ideal_sample_delay_in_ns = 0;
586 }
587
588 /*
589 * Compute the sample delay factor that corresponds to the ideal sample
590 * delay. If the result is too large, then use the maximum allowed
591 * value.
592 *
593 * Notice that we use the ns_to_cycles function to compute the sample
594 * delay factor. We do this because the form of the computation is the
595 * same as that for calculating cycles.
596 */
597 sample_delay_factor =
598 ns_to_cycles(ideal_sample_delay_in_ns << dll_delay_shift,
599 clock_period_in_ns, 0);
600
601 if (sample_delay_factor > nfc->max_sample_delay_factor)
602 sample_delay_factor = nfc->max_sample_delay_factor;
603
604 /*
605 * These macros conveniently encapsulate a computation we'll use to
606 * continuously evaluate whether or not the data sample delay is inside
607 * the eye.
608 */
609 #define IDEAL_DELAY ((int) ideal_sample_delay_in_ns)
610
611 #define QUANTIZED_DELAY \
612 ((int) ((sample_delay_factor * clock_period_in_ns) >> \
613 dll_delay_shift))
614
615 #define DELAY_ERROR (abs(QUANTIZED_DELAY - IDEAL_DELAY))
616
617 #define SAMPLE_IS_NOT_WITHIN_THE_EYE (DELAY_ERROR > (tEYE >> 1))
618
619 /*
620 * While the quantized sample time falls outside the eye, reduce the
621 * sample delay or extend the data setup to move the sampling point back
622 * toward the eye. Do not allow the number of data setup cycles to
623 * exceed the maximum allowed by the NFC.
624 */
625 while (SAMPLE_IS_NOT_WITHIN_THE_EYE &&
626 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
627 /*
628 * If control arrives here, the quantized sample delay falls
629 * outside the eye. Check if it's before the eye opens, or after
630 * the eye closes.
631 */
632 if (QUANTIZED_DELAY > IDEAL_DELAY) {
633 /*
634 * If control arrives here, the quantized sample delay
635 * falls after the eye closes. Decrease the quantized
636 * delay time and then go back to re-evaluate.
637 */
638 if (sample_delay_factor != 0)
639 sample_delay_factor--;
640 continue;
641 }
642
643 /*
644 * If control arrives here, the quantized sample delay falls
645 * before the eye opens. Shift the sample point by increasing
646 * data setup time. This will also make the eye larger.
647 */
648
649 /* Give a cycle to data setup. */
650 data_setup_in_cycles++;
651 /* Synchronize the data setup time with the cycles. */
652 data_setup_in_ns += clock_period_in_ns;
653 /* Adjust tEYE accordingly. */
654 tEYE += clock_period_in_ns;
655
656 /*
657 * Decrease the ideal sample delay by one half cycle, to keep it
658 * in the middle of the eye.
659 */
660 ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
661
662 /* ...and one less period for the delay time. */
663 ideal_sample_delay_in_ns -= clock_period_in_ns;
664
665 /* Jam a negative ideal sample delay to zero. */
666 if (ideal_sample_delay_in_ns < 0)
667 ideal_sample_delay_in_ns = 0;
668
669 /*
670 * We have a new ideal sample delay, so re-compute the quantized
671 * delay.
672 */
673 sample_delay_factor =
674 ns_to_cycles(
675 ideal_sample_delay_in_ns << dll_delay_shift,
676 clock_period_in_ns, 0);
677
678 if (sample_delay_factor > nfc->max_sample_delay_factor)
679 sample_delay_factor = nfc->max_sample_delay_factor;
680 }
681
682 /* Control arrives here when we're ready to return our results. */
683return_results:
684 hw->data_setup_in_cycles = data_setup_in_cycles;
685 hw->data_hold_in_cycles = data_hold_in_cycles;
686 hw->address_setup_in_cycles = address_setup_in_cycles;
687 hw->use_half_periods = dll_use_half_periods;
688 hw->sample_delay_factor = sample_delay_factor;
689
690 /* Return success. */
691 return 0;
692}
693
694/* Begin the I/O */
695void gpmi_begin(struct gpmi_nand_data *this)
696{
697 struct resources *r = &this->resources;
698 struct timing_threshod *nfc = &timing_default_threshold;
699 unsigned char *gpmi_regs = r->gpmi_regs;
700 unsigned int clock_period_in_ns;
701 uint32_t reg;
702 unsigned int dll_wait_time_in_us;
703 struct gpmi_nfc_hardware_timing hw;
704 int ret;
705
706 /* Enable the clock. */
707 ret = clk_enable(r->clock);
708 if (ret) {
709 pr_err("We failed in enable the clk\n");
710 goto err_out;
711 }
712
713 /* set ready/busy timeout */
714 writel(0x500 << BP_GPMI_TIMING1_BUSY_TIMEOUT,
715 gpmi_regs + HW_GPMI_TIMING1);
716
717 /* Get the timing information we need. */
718 nfc->clock_frequency_in_hz = clk_get_rate(r->clock);
719 clock_period_in_ns = 1000000000 / nfc->clock_frequency_in_hz;
720
721 gpmi_nfc_compute_hardware_timing(this, &hw);
722
723 /* Set up all the simple timing parameters. */
724 reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
725 BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) |
726 BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles) ;
727
728 writel(reg, gpmi_regs + HW_GPMI_TIMING0);
729
730 /*
731 * DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD.
732 */
733 writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR);
734
735 /* Clear out the DLL control fields. */
736 writel(BM_GPMI_CTRL1_RDN_DELAY, gpmi_regs + HW_GPMI_CTRL1_CLR);
737 writel(BM_GPMI_CTRL1_HALF_PERIOD, gpmi_regs + HW_GPMI_CTRL1_CLR);
738
739 /* If no sample delay is called for, return immediately. */
740 if (!hw.sample_delay_factor)
741 return;
742
743 /* Configure the HALF_PERIOD flag. */
744 if (hw.use_half_periods)
745 writel(BM_GPMI_CTRL1_HALF_PERIOD,
746 gpmi_regs + HW_GPMI_CTRL1_SET);
747
748 /* Set the delay factor. */
749 writel(BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor),
750 gpmi_regs + HW_GPMI_CTRL1_SET);
751
752 /* Enable the DLL. */
753 writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET);
754
755 /*
756 * After we enable the GPMI DLL, we have to wait 64 clock cycles before
757 * we can use the GPMI.
758 *
759 * Calculate the amount of time we need to wait, in microseconds.
760 */
761 dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000;
762
763 if (!dll_wait_time_in_us)
764 dll_wait_time_in_us = 1;
765
766 /* Wait for the DLL to settle. */
767 udelay(dll_wait_time_in_us);
768
769err_out:
770 return;
771}
772
773void gpmi_end(struct gpmi_nand_data *this)
774{
775 struct resources *r = &this->resources;
776 clk_disable(r->clock);
777}
778
779/* Clears a BCH interrupt. */
780void gpmi_clear_bch(struct gpmi_nand_data *this)
781{
782 struct resources *r = &this->resources;
783 writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
784}
785
786/* Returns the Ready/Busy status of the given chip. */
787int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
788{
789 struct resources *r = &this->resources;
790 uint32_t mask = 0;
791 uint32_t reg = 0;
792
793 if (GPMI_IS_MX23(this)) {
794 mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
795 reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
796 } else if (GPMI_IS_MX28(this)) {
797 mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
798 reg = readl(r->gpmi_regs + HW_GPMI_STAT);
799 } else
800 pr_err("unknow arch.\n");
801 return reg & mask;
802}
803
804static inline void set_dma_type(struct gpmi_nand_data *this,
805 enum dma_ops_type type)
806{
807 this->last_dma_type = this->dma_type;
808 this->dma_type = type;
809}
810
811int gpmi_send_command(struct gpmi_nand_data *this)
812{
813 struct dma_chan *channel = get_dma_chan(this);
814 struct dma_async_tx_descriptor *desc;
815 struct scatterlist *sgl;
816 int chip = this->current_chip;
817 u32 pio[3];
818
819 /* [1] send out the PIO words */
820 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
821 | BM_GPMI_CTRL0_WORD_LENGTH
822 | BF_GPMI_CTRL0_CS(chip, this)
823 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
824 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
825 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
826 | BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
827 pio[1] = pio[2] = 0;
828 desc = channel->device->device_prep_slave_sg(channel,
829 (struct scatterlist *)pio,
830 ARRAY_SIZE(pio), DMA_NONE, 0);
831 if (!desc) {
832 pr_err("step 1 error\n");
833 return -1;
834 }
835
836 /* [2] send out the COMMAND + ADDRESS string stored in @buffer */
837 sgl = &this->cmd_sgl;
838
839 sg_init_one(sgl, this->cmd_buffer, this->command_length);
840 dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
841 desc = channel->device->device_prep_slave_sg(channel,
842 sgl, 1, DMA_TO_DEVICE, 1);
843 if (!desc) {
844 pr_err("step 2 error\n");
845 return -1;
846 }
847
848 /* [3] submit the DMA */
849 set_dma_type(this, DMA_FOR_COMMAND);
850 return start_dma_without_bch_irq(this, desc);
851}
852
853int gpmi_send_data(struct gpmi_nand_data *this)
854{
855 struct dma_async_tx_descriptor *desc;
856 struct dma_chan *channel = get_dma_chan(this);
857 int chip = this->current_chip;
858 uint32_t command_mode;
859 uint32_t address;
860 u32 pio[2];
861
862 /* [1] PIO */
863 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
864 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
865
866 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
867 | BM_GPMI_CTRL0_WORD_LENGTH
868 | BF_GPMI_CTRL0_CS(chip, this)
869 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
870 | BF_GPMI_CTRL0_ADDRESS(address)
871 | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
872 pio[1] = 0;
873 desc = channel->device->device_prep_slave_sg(channel,
874 (struct scatterlist *)pio,
875 ARRAY_SIZE(pio), DMA_NONE, 0);
876 if (!desc) {
877 pr_err("step 1 error\n");
878 return -1;
879 }
880
881 /* [2] send DMA request */
882 prepare_data_dma(this, DMA_TO_DEVICE);
883 desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
884 1, DMA_TO_DEVICE, 1);
885 if (!desc) {
886 pr_err("step 2 error\n");
887 return -1;
888 }
889 /* [3] submit the DMA */
890 set_dma_type(this, DMA_FOR_WRITE_DATA);
891 return start_dma_without_bch_irq(this, desc);
892}
893
894int gpmi_read_data(struct gpmi_nand_data *this)
895{
896 struct dma_async_tx_descriptor *desc;
897 struct dma_chan *channel = get_dma_chan(this);
898 int chip = this->current_chip;
899 u32 pio[2];
900
901 /* [1] : send PIO */
902 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
903 | BM_GPMI_CTRL0_WORD_LENGTH
904 | BF_GPMI_CTRL0_CS(chip, this)
905 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
906 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
907 | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
908 pio[1] = 0;
909 desc = channel->device->device_prep_slave_sg(channel,
910 (struct scatterlist *)pio,
911 ARRAY_SIZE(pio), DMA_NONE, 0);
912 if (!desc) {
913 pr_err("step 1 error\n");
914 return -1;
915 }
916
917 /* [2] : send DMA request */
918 prepare_data_dma(this, DMA_FROM_DEVICE);
919 desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
920 1, DMA_FROM_DEVICE, 1);
921 if (!desc) {
922 pr_err("step 2 error\n");
923 return -1;
924 }
925
926 /* [3] : submit the DMA */
927 set_dma_type(this, DMA_FOR_READ_DATA);
928 return start_dma_without_bch_irq(this, desc);
929}
930
931int gpmi_send_page(struct gpmi_nand_data *this,
932 dma_addr_t payload, dma_addr_t auxiliary)
933{
934 struct bch_geometry *geo = &this->bch_geometry;
935 uint32_t command_mode;
936 uint32_t address;
937 uint32_t ecc_command;
938 uint32_t buffer_mask;
939 struct dma_async_tx_descriptor *desc;
940 struct dma_chan *channel = get_dma_chan(this);
941 int chip = this->current_chip;
942 u32 pio[6];
943
944 /* A DMA descriptor that does an ECC page read. */
945 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
946 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
947 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
948 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
949 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
950
951 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
952 | BM_GPMI_CTRL0_WORD_LENGTH
953 | BF_GPMI_CTRL0_CS(chip, this)
954 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
955 | BF_GPMI_CTRL0_ADDRESS(address)
956 | BF_GPMI_CTRL0_XFER_COUNT(0);
957 pio[1] = 0;
958 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
959 | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
960 | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
961 pio[3] = geo->page_size;
962 pio[4] = payload;
963 pio[5] = auxiliary;
964
965 desc = channel->device->device_prep_slave_sg(channel,
966 (struct scatterlist *)pio,
967 ARRAY_SIZE(pio), DMA_NONE, 0);
968 if (!desc) {
969 pr_err("step 2 error\n");
970 return -1;
971 }
972 set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
973 return start_dma_with_bch_irq(this, desc);
974}
975
976int gpmi_read_page(struct gpmi_nand_data *this,
977 dma_addr_t payload, dma_addr_t auxiliary)
978{
979 struct bch_geometry *geo = &this->bch_geometry;
980 uint32_t command_mode;
981 uint32_t address;
982 uint32_t ecc_command;
983 uint32_t buffer_mask;
984 struct dma_async_tx_descriptor *desc;
985 struct dma_chan *channel = get_dma_chan(this);
986 int chip = this->current_chip;
987 u32 pio[6];
988
989 /* [1] Wait for the chip to report ready. */
990 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
991 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
992
993 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
994 | BM_GPMI_CTRL0_WORD_LENGTH
995 | BF_GPMI_CTRL0_CS(chip, this)
996 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
997 | BF_GPMI_CTRL0_ADDRESS(address)
998 | BF_GPMI_CTRL0_XFER_COUNT(0);
999 pio[1] = 0;
1000 desc = channel->device->device_prep_slave_sg(channel,
1001 (struct scatterlist *)pio, 2, DMA_NONE, 0);
1002 if (!desc) {
1003 pr_err("step 1 error\n");
1004 return -1;
1005 }
1006
1007 /* [2] Enable the BCH block and read. */
1008 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
1009 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1010 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
1011 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
1012 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
1013
1014 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1015 | BM_GPMI_CTRL0_WORD_LENGTH
1016 | BF_GPMI_CTRL0_CS(chip, this)
1017 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1018 | BF_GPMI_CTRL0_ADDRESS(address)
1019 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1020
1021 pio[1] = 0;
1022 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
1023 | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
1024 | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
1025 pio[3] = geo->page_size;
1026 pio[4] = payload;
1027 pio[5] = auxiliary;
1028 desc = channel->device->device_prep_slave_sg(channel,
1029 (struct scatterlist *)pio,
1030 ARRAY_SIZE(pio), DMA_NONE, 1);
1031 if (!desc) {
1032 pr_err("step 2 error\n");
1033 return -1;
1034 }
1035
1036 /* [3] Disable the BCH block */
1037 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
1038 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1039
1040 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1041 | BM_GPMI_CTRL0_WORD_LENGTH
1042 | BF_GPMI_CTRL0_CS(chip, this)
1043 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1044 | BF_GPMI_CTRL0_ADDRESS(address)
1045 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1046 pio[1] = 0;
1047 desc = channel->device->device_prep_slave_sg(channel,
1048 (struct scatterlist *)pio, 2, DMA_NONE, 1);
1049 if (!desc) {
1050 pr_err("step 3 error\n");
1051 return -1;
1052 }
1053
1054 /* [4] submit the DMA */
1055 set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
1056 return start_dma_with_bch_irq(this, desc);
1057}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
new file mode 100644
index 000000000000..071b63420f0e
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -0,0 +1,1619 @@
1/*
2 * Freescale GPMI NAND Flash Driver
3 *
4 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
5 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21#include <linux/clk.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/mtd/gpmi-nand.h>
25#include <linux/mtd/partitions.h>
26
27#include "gpmi-nand.h"
28
29/* add our owner bbt descriptor */
30static uint8_t scan_ff_pattern[] = { 0xff };
31static struct nand_bbt_descr gpmi_bbt_descr = {
32 .options = 0,
33 .offs = 0,
34 .len = 1,
35 .pattern = scan_ff_pattern
36};
37
38/* We will use all the (page + OOB). */
39static struct nand_ecclayout gpmi_hw_ecclayout = {
40 .eccbytes = 0,
41 .eccpos = { 0, },
42 .oobfree = { {.offset = 0, .length = 0} }
43};
44
45static irqreturn_t bch_irq(int irq, void *cookie)
46{
47 struct gpmi_nand_data *this = cookie;
48
49 gpmi_clear_bch(this);
50 complete(&this->bch_done);
51 return IRQ_HANDLED;
52}
53
54/*
55 * Calculate the ECC strength by hand:
56 * E : The ECC strength.
57 * G : the length of Galois Field.
58 * N : The chunk count of per page.
59 * O : the oobsize of the NAND chip.
60 * M : the metasize of per page.
61 *
62 * The formula is :
63 * E * G * N
64 * ------------ <= (O - M)
65 * 8
66 *
67 * So, we get E by:
68 * (O - M) * 8
69 * E <= -------------
70 * G * N
71 */
72static inline int get_ecc_strength(struct gpmi_nand_data *this)
73{
74 struct bch_geometry *geo = &this->bch_geometry;
75 struct mtd_info *mtd = &this->mtd;
76 int ecc_strength;
77
78 ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
79 / (geo->gf_len * geo->ecc_chunk_count);
80
81 /* We need the minor even number. */
82 return round_down(ecc_strength, 2);
83}
84
85int common_nfc_set_geometry(struct gpmi_nand_data *this)
86{
87 struct bch_geometry *geo = &this->bch_geometry;
88 struct mtd_info *mtd = &this->mtd;
89 unsigned int metadata_size;
90 unsigned int status_size;
91 unsigned int block_mark_bit_offset;
92
93 /*
94 * The size of the metadata can be changed, though we set it to 10
95 * bytes now. But it can't be too large, because we have to save
96 * enough space for BCH.
97 */
98 geo->metadata_size = 10;
99
100 /* The default for the length of Galois Field. */
101 geo->gf_len = 13;
102
103 /* The default for chunk size. There is no oobsize greater then 512. */
104 geo->ecc_chunk_size = 512;
105 while (geo->ecc_chunk_size < mtd->oobsize)
106 geo->ecc_chunk_size *= 2; /* keep C >= O */
107
108 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
109
110 /* We use the same ECC strength for all chunks. */
111 geo->ecc_strength = get_ecc_strength(this);
112 if (!geo->ecc_strength) {
113 pr_err("We get a wrong ECC strength.\n");
114 return -EINVAL;
115 }
116
117 geo->page_size = mtd->writesize + mtd->oobsize;
118 geo->payload_size = mtd->writesize;
119
120 /*
121 * The auxiliary buffer contains the metadata and the ECC status. The
122 * metadata is padded to the nearest 32-bit boundary. The ECC status
123 * contains one byte for every ECC chunk, and is also padded to the
124 * nearest 32-bit boundary.
125 */
126 metadata_size = ALIGN(geo->metadata_size, 4);
127 status_size = ALIGN(geo->ecc_chunk_count, 4);
128
129 geo->auxiliary_size = metadata_size + status_size;
130 geo->auxiliary_status_offset = metadata_size;
131
132 if (!this->swap_block_mark)
133 return 0;
134
135 /*
136 * We need to compute the byte and bit offsets of
137 * the physical block mark within the ECC-based view of the page.
138 *
139 * NAND chip with 2K page shows below:
140 * (Block Mark)
141 * | |
142 * | D |
143 * |<---->|
144 * V V
145 * +---+----------+-+----------+-+----------+-+----------+-+
146 * | M | data |E| data |E| data |E| data |E|
147 * +---+----------+-+----------+-+----------+-+----------+-+
148 *
149 * The position of block mark moves forward in the ECC-based view
150 * of page, and the delta is:
151 *
152 * E * G * (N - 1)
153 * D = (---------------- + M)
154 * 8
155 *
156 * With the formula to compute the ECC strength, and the condition
157 * : C >= O (C is the ecc chunk size)
158 *
159 * It's easy to deduce to the following result:
160 *
161 * E * G (O - M) C - M C - M
162 * ----------- <= ------- <= -------- < ---------
163 * 8 N N (N - 1)
164 *
165 * So, we get:
166 *
167 * E * G * (N - 1)
168 * D = (---------------- + M) < C
169 * 8
170 *
171 * The above inequality means the position of block mark
172 * within the ECC-based view of the page is still in the data chunk,
173 * and it's NOT in the ECC bits of the chunk.
174 *
175 * Use the following to compute the bit position of the
176 * physical block mark within the ECC-based view of the page:
177 * (page_size - D) * 8
178 *
179 * --Huang Shijie
180 */
181 block_mark_bit_offset = mtd->writesize * 8 -
182 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
183 + geo->metadata_size * 8);
184
185 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
186 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
187 return 0;
188}
189
190struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
191{
192 int chipnr = this->current_chip;
193
194 return this->dma_chans[chipnr];
195}
196
197/* Can we use the upper's buffer directly for DMA? */
198void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
199{
200 struct scatterlist *sgl = &this->data_sgl;
201 int ret;
202
203 this->direct_dma_map_ok = true;
204
205 /* first try to map the upper buffer directly */
206 sg_init_one(sgl, this->upper_buf, this->upper_len);
207 ret = dma_map_sg(this->dev, sgl, 1, dr);
208 if (ret == 0) {
209 /* We have to use our own DMA buffer. */
210 sg_init_one(sgl, this->data_buffer_dma, PAGE_SIZE);
211
212 if (dr == DMA_TO_DEVICE)
213 memcpy(this->data_buffer_dma, this->upper_buf,
214 this->upper_len);
215
216 ret = dma_map_sg(this->dev, sgl, 1, dr);
217 if (ret == 0)
218 pr_err("map failed.\n");
219
220 this->direct_dma_map_ok = false;
221 }
222}
223
224/* This will be called after the DMA operation is finished. */
225static void dma_irq_callback(void *param)
226{
227 struct gpmi_nand_data *this = param;
228 struct completion *dma_c = &this->dma_done;
229
230 complete(dma_c);
231
232 switch (this->dma_type) {
233 case DMA_FOR_COMMAND:
234 dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
235 break;
236
237 case DMA_FOR_READ_DATA:
238 dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
239 if (this->direct_dma_map_ok == false)
240 memcpy(this->upper_buf, this->data_buffer_dma,
241 this->upper_len);
242 break;
243
244 case DMA_FOR_WRITE_DATA:
245 dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
246 break;
247
248 case DMA_FOR_READ_ECC_PAGE:
249 case DMA_FOR_WRITE_ECC_PAGE:
250 /* We have to wait the BCH interrupt to finish. */
251 break;
252
253 default:
254 pr_err("in wrong DMA operation.\n");
255 }
256}
257
258int start_dma_without_bch_irq(struct gpmi_nand_data *this,
259 struct dma_async_tx_descriptor *desc)
260{
261 struct completion *dma_c = &this->dma_done;
262 int err;
263
264 init_completion(dma_c);
265
266 desc->callback = dma_irq_callback;
267 desc->callback_param = this;
268 dmaengine_submit(desc);
269
270 /* Wait for the interrupt from the DMA block. */
271 err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
272 if (!err) {
273 pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type);
274 gpmi_dump_info(this);
275 return -ETIMEDOUT;
276 }
277 return 0;
278}
279
280/*
281 * This function is used in BCH reading or BCH writing pages.
282 * It will wait for the BCH interrupt as long as ONE second.
283 * Actually, we must wait for two interrupts :
284 * [1] firstly the DMA interrupt and
285 * [2] secondly the BCH interrupt.
286 */
287int start_dma_with_bch_irq(struct gpmi_nand_data *this,
288 struct dma_async_tx_descriptor *desc)
289{
290 struct completion *bch_c = &this->bch_done;
291 int err;
292
293 /* Prepare to receive an interrupt from the BCH block. */
294 init_completion(bch_c);
295
296 /* start the DMA */
297 start_dma_without_bch_irq(this, desc);
298
299 /* Wait for the interrupt from the BCH block. */
300 err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
301 if (!err) {
302 pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type);
303 gpmi_dump_info(this);
304 return -ETIMEDOUT;
305 }
306 return 0;
307}
308
309static int __devinit
310acquire_register_block(struct gpmi_nand_data *this, const char *res_name)
311{
312 struct platform_device *pdev = this->pdev;
313 struct resources *res = &this->resources;
314 struct resource *r;
315 void *p;
316
317 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
318 if (!r) {
319 pr_err("Can't get resource for %s\n", res_name);
320 return -ENXIO;
321 }
322
323 p = ioremap(r->start, resource_size(r));
324 if (!p) {
325 pr_err("Can't remap %s\n", res_name);
326 return -ENOMEM;
327 }
328
329 if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
330 res->gpmi_regs = p;
331 else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
332 res->bch_regs = p;
333 else
334 pr_err("unknown resource name : %s\n", res_name);
335
336 return 0;
337}
338
339static void release_register_block(struct gpmi_nand_data *this)
340{
341 struct resources *res = &this->resources;
342 if (res->gpmi_regs)
343 iounmap(res->gpmi_regs);
344 if (res->bch_regs)
345 iounmap(res->bch_regs);
346 res->gpmi_regs = NULL;
347 res->bch_regs = NULL;
348}
349
350static int __devinit
351acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
352{
353 struct platform_device *pdev = this->pdev;
354 struct resources *res = &this->resources;
355 const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
356 struct resource *r;
357 int err;
358
359 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
360 if (!r) {
361 pr_err("Can't get resource for %s\n", res_name);
362 return -ENXIO;
363 }
364
365 err = request_irq(r->start, irq_h, 0, res_name, this);
366 if (err) {
367 pr_err("Can't own %s\n", res_name);
368 return err;
369 }
370
371 res->bch_low_interrupt = r->start;
372 res->bch_high_interrupt = r->end;
373 return 0;
374}
375
376static void release_bch_irq(struct gpmi_nand_data *this)
377{
378 struct resources *res = &this->resources;
379 int i = res->bch_low_interrupt;
380
381 for (; i <= res->bch_high_interrupt; i++)
382 free_irq(i, this);
383}
384
385static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
386{
387 struct gpmi_nand_data *this = param;
388 struct resource *r = this->private;
389
390 if (!mxs_dma_is_apbh(chan))
391 return false;
392 /*
393 * only catch the GPMI dma channels :
394 * for mx23 : MX23_DMA_GPMI0 ~ MX23_DMA_GPMI3
395 * (These four channels share the same IRQ!)
396 *
397 * for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7
398 * (These eight channels share the same IRQ!)
399 */
400 if (r->start <= chan->chan_id && chan->chan_id <= r->end) {
401 chan->private = &this->dma_data;
402 return true;
403 }
404 return false;
405}
406
407static void release_dma_channels(struct gpmi_nand_data *this)
408{
409 unsigned int i;
410 for (i = 0; i < DMA_CHANS; i++)
411 if (this->dma_chans[i]) {
412 dma_release_channel(this->dma_chans[i]);
413 this->dma_chans[i] = NULL;
414 }
415}
416
417static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
418{
419 struct platform_device *pdev = this->pdev;
420 struct gpmi_nand_platform_data *pdata = this->pdata;
421 struct resources *res = &this->resources;
422 struct resource *r, *r_dma;
423 unsigned int i;
424
425 r = platform_get_resource_byname(pdev, IORESOURCE_DMA,
426 GPMI_NAND_DMA_CHANNELS_RES_NAME);
427 r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
428 GPMI_NAND_DMA_INTERRUPT_RES_NAME);
429 if (!r || !r_dma) {
430 pr_err("Can't get resource for DMA\n");
431 return -ENXIO;
432 }
433
434 /* used in gpmi_dma_filter() */
435 this->private = r;
436
437 for (i = r->start; i <= r->end; i++) {
438 struct dma_chan *dma_chan;
439 dma_cap_mask_t mask;
440
441 if (i - r->start >= pdata->max_chip_count)
442 break;
443
444 dma_cap_zero(mask);
445 dma_cap_set(DMA_SLAVE, mask);
446
447 /* get the DMA interrupt */
448 if (r_dma->start == r_dma->end) {
449 /* only register the first. */
450 if (i == r->start)
451 this->dma_data.chan_irq = r_dma->start;
452 else
453 this->dma_data.chan_irq = NO_IRQ;
454 } else
455 this->dma_data.chan_irq = r_dma->start + (i - r->start);
456
457 dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
458 if (!dma_chan)
459 goto acquire_err;
460
461 /* fill the first empty item */
462 this->dma_chans[i - r->start] = dma_chan;
463 }
464
465 res->dma_low_channel = r->start;
466 res->dma_high_channel = i;
467 return 0;
468
469acquire_err:
470 pr_err("Can't acquire DMA channel %u\n", i);
471 release_dma_channels(this);
472 return -EINVAL;
473}
474
475static int __devinit acquire_resources(struct gpmi_nand_data *this)
476{
477 struct resources *res = &this->resources;
478 int ret;
479
480 ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
481 if (ret)
482 goto exit_regs;
483
484 ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
485 if (ret)
486 goto exit_regs;
487
488 ret = acquire_bch_irq(this, bch_irq);
489 if (ret)
490 goto exit_regs;
491
492 ret = acquire_dma_channels(this);
493 if (ret)
494 goto exit_dma_channels;
495
496 res->clock = clk_get(&this->pdev->dev, NULL);
497 if (IS_ERR(res->clock)) {
498 pr_err("can not get the clock\n");
499 ret = -ENOENT;
500 goto exit_clock;
501 }
502 return 0;
503
504exit_clock:
505 release_dma_channels(this);
506exit_dma_channels:
507 release_bch_irq(this);
508exit_regs:
509 release_register_block(this);
510 return ret;
511}
512
513static void release_resources(struct gpmi_nand_data *this)
514{
515 struct resources *r = &this->resources;
516
517 clk_put(r->clock);
518 release_register_block(this);
519 release_bch_irq(this);
520 release_dma_channels(this);
521}
522
523static int __devinit init_hardware(struct gpmi_nand_data *this)
524{
525 int ret;
526
527 /*
528 * This structure contains the "safe" GPMI timing that should succeed
529 * with any NAND Flash device
530 * (although, with less-than-optimal performance).
531 */
532 struct nand_timing safe_timing = {
533 .data_setup_in_ns = 80,
534 .data_hold_in_ns = 60,
535 .address_setup_in_ns = 25,
536 .gpmi_sample_delay_in_ns = 6,
537 .tREA_in_ns = -1,
538 .tRLOH_in_ns = -1,
539 .tRHOH_in_ns = -1,
540 };
541
542 /* Initialize the hardwares. */
543 ret = gpmi_init(this);
544 if (ret)
545 return ret;
546
547 this->timing = safe_timing;
548 return 0;
549}
550
551static int read_page_prepare(struct gpmi_nand_data *this,
552 void *destination, unsigned length,
553 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
554 void **use_virt, dma_addr_t *use_phys)
555{
556 struct device *dev = this->dev;
557
558 if (virt_addr_valid(destination)) {
559 dma_addr_t dest_phys;
560
561 dest_phys = dma_map_single(dev, destination,
562 length, DMA_FROM_DEVICE);
563 if (dma_mapping_error(dev, dest_phys)) {
564 if (alt_size < length) {
565 pr_err("Alternate buffer is too small\n");
566 return -ENOMEM;
567 }
568 goto map_failed;
569 }
570 *use_virt = destination;
571 *use_phys = dest_phys;
572 this->direct_dma_map_ok = true;
573 return 0;
574 }
575
576map_failed:
577 *use_virt = alt_virt;
578 *use_phys = alt_phys;
579 this->direct_dma_map_ok = false;
580 return 0;
581}
582
583static inline void read_page_end(struct gpmi_nand_data *this,
584 void *destination, unsigned length,
585 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
586 void *used_virt, dma_addr_t used_phys)
587{
588 if (this->direct_dma_map_ok)
589 dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE);
590}
591
592static inline void read_page_swap_end(struct gpmi_nand_data *this,
593 void *destination, unsigned length,
594 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
595 void *used_virt, dma_addr_t used_phys)
596{
597 if (!this->direct_dma_map_ok)
598 memcpy(destination, alt_virt, length);
599}
600
601static int send_page_prepare(struct gpmi_nand_data *this,
602 const void *source, unsigned length,
603 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
604 const void **use_virt, dma_addr_t *use_phys)
605{
606 struct device *dev = this->dev;
607
608 if (virt_addr_valid(source)) {
609 dma_addr_t source_phys;
610
611 source_phys = dma_map_single(dev, (void *)source, length,
612 DMA_TO_DEVICE);
613 if (dma_mapping_error(dev, source_phys)) {
614 if (alt_size < length) {
615 pr_err("Alternate buffer is too small\n");
616 return -ENOMEM;
617 }
618 goto map_failed;
619 }
620 *use_virt = source;
621 *use_phys = source_phys;
622 return 0;
623 }
624map_failed:
625 /*
626 * Copy the content of the source buffer into the alternate
627 * buffer and set up the return values accordingly.
628 */
629 memcpy(alt_virt, source, length);
630
631 *use_virt = alt_virt;
632 *use_phys = alt_phys;
633 return 0;
634}
635
636static void send_page_end(struct gpmi_nand_data *this,
637 const void *source, unsigned length,
638 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
639 const void *used_virt, dma_addr_t used_phys)
640{
641 struct device *dev = this->dev;
642 if (used_virt == source)
643 dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE);
644}
645
646static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
647{
648 struct device *dev = this->dev;
649
650 if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt))
651 dma_free_coherent(dev, this->page_buffer_size,
652 this->page_buffer_virt,
653 this->page_buffer_phys);
654 kfree(this->cmd_buffer);
655 kfree(this->data_buffer_dma);
656
657 this->cmd_buffer = NULL;
658 this->data_buffer_dma = NULL;
659 this->page_buffer_virt = NULL;
660 this->page_buffer_size = 0;
661}
662
663/* Allocate the DMA buffers */
664static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
665{
666 struct bch_geometry *geo = &this->bch_geometry;
667 struct device *dev = this->dev;
668
669 /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
670 this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA);
671 if (this->cmd_buffer == NULL)
672 goto error_alloc;
673
674 /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */
675 this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA);
676 if (this->data_buffer_dma == NULL)
677 goto error_alloc;
678
679 /*
680 * [3] Allocate the page buffer.
681 *
682 * Both the payload buffer and the auxiliary buffer must appear on
683 * 32-bit boundaries. We presume the size of the payload buffer is a
684 * power of two and is much larger than four, which guarantees the
685 * auxiliary buffer will appear on a 32-bit boundary.
686 */
687 this->page_buffer_size = geo->payload_size + geo->auxiliary_size;
688 this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size,
689 &this->page_buffer_phys, GFP_DMA);
690 if (!this->page_buffer_virt)
691 goto error_alloc;
692
693
694 /* Slice up the page buffer. */
695 this->payload_virt = this->page_buffer_virt;
696 this->payload_phys = this->page_buffer_phys;
697 this->auxiliary_virt = this->payload_virt + geo->payload_size;
698 this->auxiliary_phys = this->payload_phys + geo->payload_size;
699 return 0;
700
701error_alloc:
702 gpmi_free_dma_buffer(this);
703 pr_err("allocate DMA buffer ret!!\n");
704 return -ENOMEM;
705}
706
707static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
708{
709 struct nand_chip *chip = mtd->priv;
710 struct gpmi_nand_data *this = chip->priv;
711 int ret;
712
713 /*
714 * Every operation begins with a command byte and a series of zero or
715 * more address bytes. These are distinguished by either the Address
716 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
717 * asserted. When MTD is ready to execute the command, it will deassert
718 * both latch enables.
719 *
720 * Rather than run a separate DMA operation for every single byte, we
721 * queue them up and run a single DMA operation for the entire series
722 * of command and data bytes. NAND_CMD_NONE means the END of the queue.
723 */
724 if ((ctrl & (NAND_ALE | NAND_CLE))) {
725 if (data != NAND_CMD_NONE)
726 this->cmd_buffer[this->command_length++] = data;
727 return;
728 }
729
730 if (!this->command_length)
731 return;
732
733 ret = gpmi_send_command(this);
734 if (ret)
735 pr_err("Chip: %u, Error %d\n", this->current_chip, ret);
736
737 this->command_length = 0;
738}
739
740static int gpmi_dev_ready(struct mtd_info *mtd)
741{
742 struct nand_chip *chip = mtd->priv;
743 struct gpmi_nand_data *this = chip->priv;
744
745 return gpmi_is_ready(this, this->current_chip);
746}
747
748static void gpmi_select_chip(struct mtd_info *mtd, int chipnr)
749{
750 struct nand_chip *chip = mtd->priv;
751 struct gpmi_nand_data *this = chip->priv;
752
753 if ((this->current_chip < 0) && (chipnr >= 0))
754 gpmi_begin(this);
755 else if ((this->current_chip >= 0) && (chipnr < 0))
756 gpmi_end(this);
757
758 this->current_chip = chipnr;
759}
760
761static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
762{
763 struct nand_chip *chip = mtd->priv;
764 struct gpmi_nand_data *this = chip->priv;
765
766 pr_debug("len is %d\n", len);
767 this->upper_buf = buf;
768 this->upper_len = len;
769
770 gpmi_read_data(this);
771}
772
773static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
774{
775 struct nand_chip *chip = mtd->priv;
776 struct gpmi_nand_data *this = chip->priv;
777
778 pr_debug("len is %d\n", len);
779 this->upper_buf = (uint8_t *)buf;
780 this->upper_len = len;
781
782 gpmi_send_data(this);
783}
784
785static uint8_t gpmi_read_byte(struct mtd_info *mtd)
786{
787 struct nand_chip *chip = mtd->priv;
788 struct gpmi_nand_data *this = chip->priv;
789 uint8_t *buf = this->data_buffer_dma;
790
791 gpmi_read_buf(mtd, buf, 1);
792 return buf[0];
793}
794
795/*
796 * Handles block mark swapping.
797 * It can be called in swapping the block mark, or swapping it back,
798 * because the the operations are the same.
799 */
800static void block_mark_swapping(struct gpmi_nand_data *this,
801 void *payload, void *auxiliary)
802{
803 struct bch_geometry *nfc_geo = &this->bch_geometry;
804 unsigned char *p;
805 unsigned char *a;
806 unsigned int bit;
807 unsigned char mask;
808 unsigned char from_data;
809 unsigned char from_oob;
810
811 if (!this->swap_block_mark)
812 return;
813
814 /*
815 * If control arrives here, we're swapping. Make some convenience
816 * variables.
817 */
818 bit = nfc_geo->block_mark_bit_offset;
819 p = payload + nfc_geo->block_mark_byte_offset;
820 a = auxiliary;
821
822 /*
823 * Get the byte from the data area that overlays the block mark. Since
824 * the ECC engine applies its own view to the bits in the page, the
825 * physical block mark won't (in general) appear on a byte boundary in
826 * the data.
827 */
828 from_data = (p[0] >> bit) | (p[1] << (8 - bit));
829
830 /* Get the byte from the OOB. */
831 from_oob = a[0];
832
833 /* Swap them. */
834 a[0] = from_data;
835
836 mask = (0x1 << bit) - 1;
837 p[0] = (p[0] & mask) | (from_oob << bit);
838
839 mask = ~0 << bit;
840 p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
841}
842
843static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
844 uint8_t *buf, int page)
845{
846 struct gpmi_nand_data *this = chip->priv;
847 struct bch_geometry *nfc_geo = &this->bch_geometry;
848 void *payload_virt;
849 dma_addr_t payload_phys;
850 void *auxiliary_virt;
851 dma_addr_t auxiliary_phys;
852 unsigned int i;
853 unsigned char *status;
854 unsigned int failed;
855 unsigned int corrected;
856 int ret;
857
858 pr_debug("page number is : %d\n", page);
859 ret = read_page_prepare(this, buf, mtd->writesize,
860 this->payload_virt, this->payload_phys,
861 nfc_geo->payload_size,
862 &payload_virt, &payload_phys);
863 if (ret) {
864 pr_err("Inadequate DMA buffer\n");
865 ret = -ENOMEM;
866 return ret;
867 }
868 auxiliary_virt = this->auxiliary_virt;
869 auxiliary_phys = this->auxiliary_phys;
870
871 /* go! */
872 ret = gpmi_read_page(this, payload_phys, auxiliary_phys);
873 read_page_end(this, buf, mtd->writesize,
874 this->payload_virt, this->payload_phys,
875 nfc_geo->payload_size,
876 payload_virt, payload_phys);
877 if (ret) {
878 pr_err("Error in ECC-based read: %d\n", ret);
879 goto exit_nfc;
880 }
881
882 /* handle the block mark swapping */
883 block_mark_swapping(this, payload_virt, auxiliary_virt);
884
885 /* Loop over status bytes, accumulating ECC status. */
886 failed = 0;
887 corrected = 0;
888 status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
889
890 for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
891 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
892 continue;
893
894 if (*status == STATUS_UNCORRECTABLE) {
895 failed++;
896 continue;
897 }
898 corrected += *status;
899 }
900
901 /*
902 * Propagate ECC status to the owning MTD only when failed or
903 * corrected times nearly reaches our ECC correction threshold.
904 */
905 if (failed || corrected >= (nfc_geo->ecc_strength - 1)) {
906 mtd->ecc_stats.failed += failed;
907 mtd->ecc_stats.corrected += corrected;
908 }
909
910 /*
911 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() for
912 * details about our policy for delivering the OOB.
913 *
914 * We fill the caller's buffer with set bits, and then copy the block
915 * mark to th caller's buffer. Note that, if block mark swapping was
916 * necessary, it has already been done, so we can rely on the first
917 * byte of the auxiliary buffer to contain the block mark.
918 */
919 memset(chip->oob_poi, ~0, mtd->oobsize);
920 chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
921
922 read_page_swap_end(this, buf, mtd->writesize,
923 this->payload_virt, this->payload_phys,
924 nfc_geo->payload_size,
925 payload_virt, payload_phys);
926exit_nfc:
927 return ret;
928}
929
930static void gpmi_ecc_write_page(struct mtd_info *mtd,
931 struct nand_chip *chip, const uint8_t *buf)
932{
933 struct gpmi_nand_data *this = chip->priv;
934 struct bch_geometry *nfc_geo = &this->bch_geometry;
935 const void *payload_virt;
936 dma_addr_t payload_phys;
937 const void *auxiliary_virt;
938 dma_addr_t auxiliary_phys;
939 int ret;
940
941 pr_debug("ecc write page.\n");
942 if (this->swap_block_mark) {
943 /*
944 * If control arrives here, we're doing block mark swapping.
945 * Since we can't modify the caller's buffers, we must copy them
946 * into our own.
947 */
948 memcpy(this->payload_virt, buf, mtd->writesize);
949 payload_virt = this->payload_virt;
950 payload_phys = this->payload_phys;
951
952 memcpy(this->auxiliary_virt, chip->oob_poi,
953 nfc_geo->auxiliary_size);
954 auxiliary_virt = this->auxiliary_virt;
955 auxiliary_phys = this->auxiliary_phys;
956
957 /* Handle block mark swapping. */
958 block_mark_swapping(this,
959 (void *) payload_virt, (void *) auxiliary_virt);
960 } else {
961 /*
962 * If control arrives here, we're not doing block mark swapping,
963 * so we can to try and use the caller's buffers.
964 */
965 ret = send_page_prepare(this,
966 buf, mtd->writesize,
967 this->payload_virt, this->payload_phys,
968 nfc_geo->payload_size,
969 &payload_virt, &payload_phys);
970 if (ret) {
971 pr_err("Inadequate payload DMA buffer\n");
972 return;
973 }
974
975 ret = send_page_prepare(this,
976 chip->oob_poi, mtd->oobsize,
977 this->auxiliary_virt, this->auxiliary_phys,
978 nfc_geo->auxiliary_size,
979 &auxiliary_virt, &auxiliary_phys);
980 if (ret) {
981 pr_err("Inadequate auxiliary DMA buffer\n");
982 goto exit_auxiliary;
983 }
984 }
985
986 /* Ask the NFC. */
987 ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
988 if (ret)
989 pr_err("Error in ECC-based write: %d\n", ret);
990
991 if (!this->swap_block_mark) {
992 send_page_end(this, chip->oob_poi, mtd->oobsize,
993 this->auxiliary_virt, this->auxiliary_phys,
994 nfc_geo->auxiliary_size,
995 auxiliary_virt, auxiliary_phys);
996exit_auxiliary:
997 send_page_end(this, buf, mtd->writesize,
998 this->payload_virt, this->payload_phys,
999 nfc_geo->payload_size,
1000 payload_virt, payload_phys);
1001 }
1002}
1003
1004/*
1005 * There are several places in this driver where we have to handle the OOB and
1006 * block marks. This is the function where things are the most complicated, so
1007 * this is where we try to explain it all. All the other places refer back to
1008 * here.
1009 *
1010 * These are the rules, in order of decreasing importance:
1011 *
1012 * 1) Nothing the caller does can be allowed to imperil the block mark.
1013 *
1014 * 2) In read operations, the first byte of the OOB we return must reflect the
1015 * true state of the block mark, no matter where that block mark appears in
1016 * the physical page.
1017 *
1018 * 3) ECC-based read operations return an OOB full of set bits (since we never
1019 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1020 * return).
1021 *
1022 * 4) "Raw" read operations return a direct view of the physical bytes in the
1023 * page, using the conventional definition of which bytes are data and which
1024 * are OOB. This gives the caller a way to see the actual, physical bytes
1025 * in the page, without the distortions applied by our ECC engine.
1026 *
1027 *
1028 * What we do for this specific read operation depends on two questions:
1029 *
1030 * 1) Are we doing a "raw" read, or an ECC-based read?
1031 *
1032 * 2) Are we using block mark swapping or transcription?
1033 *
1034 * There are four cases, illustrated by the following Karnaugh map:
1035 *
1036 * | Raw | ECC-based |
1037 * -------------+-------------------------+-------------------------+
1038 * | Read the conventional | |
1039 * | OOB at the end of the | |
1040 * Swapping | page and return it. It | |
1041 * | contains exactly what | |
1042 * | we want. | Read the block mark and |
1043 * -------------+-------------------------+ return it in a buffer |
1044 * | Read the conventional | full of set bits. |
1045 * | OOB at the end of the | |
1046 * | page and also the block | |
1047 * Transcribing | mark in the metadata. | |
1048 * | Copy the block mark | |
1049 * | into the first byte of | |
1050 * | the OOB. | |
1051 * -------------+-------------------------+-------------------------+
1052 *
1053 * Note that we break rule #4 in the Transcribing/Raw case because we're not
1054 * giving an accurate view of the actual, physical bytes in the page (we're
1055 * overwriting the block mark). That's OK because it's more important to follow
1056 * rule #2.
1057 *
1058 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1059 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1060 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1061 * ECC-based or raw view of the page is implicit in which function it calls
1062 * (there is a similar pair of ECC-based/raw functions for writing).
1063 *
1064 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1065 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1066 * caller wants an ECC-based or raw view of the page is not propagated down to
1067 * this driver.
1068 */
1069static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1070 int page, int sndcmd)
1071{
1072 struct gpmi_nand_data *this = chip->priv;
1073
1074 pr_debug("page number is %d\n", page);
1075 /* clear the OOB buffer */
1076 memset(chip->oob_poi, ~0, mtd->oobsize);
1077
1078 /* Read out the conventional OOB. */
1079 chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1080 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1081
1082 /*
1083 * Now, we want to make sure the block mark is correct. In the
1084 * Swapping/Raw case, we already have it. Otherwise, we need to
1085 * explicitly read it.
1086 */
1087 if (!this->swap_block_mark) {
1088 /* Read the block mark into the first byte of the OOB buffer. */
1089 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1090 chip->oob_poi[0] = chip->read_byte(mtd);
1091 }
1092
1093 /*
1094 * Return true, indicating that the next call to this function must send
1095 * a command.
1096 */
1097 return true;
1098}
1099
1100static int
1101gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
1102{
1103 /*
1104 * The BCH will use all the (page + oob).
1105 * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob.
1106 * But it can not stop some ioctls such MEMWRITEOOB which uses
1107 * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit
1108 * these ioctls too.
1109 */
1110 return -EPERM;
1111}
1112
1113static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
1114{
1115 struct nand_chip *chip = mtd->priv;
1116 struct gpmi_nand_data *this = chip->priv;
1117 int block, ret = 0;
1118 uint8_t *block_mark;
1119 int column, page, status, chipnr;
1120
1121 /* Get block number */
1122 block = (int)(ofs >> chip->bbt_erase_shift);
1123 if (chip->bbt)
1124 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
1125
1126 /* Do we have a flash based bad block table ? */
1127 if (chip->options & NAND_BBT_USE_FLASH)
1128 ret = nand_update_bbt(mtd, ofs);
1129 else {
1130 chipnr = (int)(ofs >> chip->chip_shift);
1131 chip->select_chip(mtd, chipnr);
1132
1133 column = this->swap_block_mark ? mtd->writesize : 0;
1134
1135 /* Write the block mark. */
1136 block_mark = this->data_buffer_dma;
1137 block_mark[0] = 0; /* bad block marker */
1138
1139 /* Shift to get page */
1140 page = (int)(ofs >> chip->page_shift);
1141
1142 chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
1143 chip->write_buf(mtd, block_mark, 1);
1144 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1145
1146 status = chip->waitfunc(mtd, chip);
1147 if (status & NAND_STATUS_FAIL)
1148 ret = -EIO;
1149
1150 chip->select_chip(mtd, -1);
1151 }
1152 if (!ret)
1153 mtd->ecc_stats.badblocks++;
1154
1155 return ret;
1156}
1157
1158static int __devinit nand_boot_set_geometry(struct gpmi_nand_data *this)
1159{
1160 struct boot_rom_geometry *geometry = &this->rom_geometry;
1161
1162 /*
1163 * Set the boot block stride size.
1164 *
1165 * In principle, we should be reading this from the OTP bits, since
1166 * that's where the ROM is going to get it. In fact, we don't have any
1167 * way to read the OTP bits, so we go with the default and hope for the
1168 * best.
1169 */
1170 geometry->stride_size_in_pages = 64;
1171
1172 /*
1173 * Set the search area stride exponent.
1174 *
1175 * In principle, we should be reading this from the OTP bits, since
1176 * that's where the ROM is going to get it. In fact, we don't have any
1177 * way to read the OTP bits, so we go with the default and hope for the
1178 * best.
1179 */
1180 geometry->search_area_stride_exponent = 2;
1181 return 0;
1182}
1183
1184static const char *fingerprint = "STMP";
1185static int __devinit mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1186{
1187 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1188 struct device *dev = this->dev;
1189 struct mtd_info *mtd = &this->mtd;
1190 struct nand_chip *chip = &this->nand;
1191 unsigned int search_area_size_in_strides;
1192 unsigned int stride;
1193 unsigned int page;
1194 loff_t byte;
1195 uint8_t *buffer = chip->buffers->databuf;
1196 int saved_chip_number;
1197 int found_an_ncb_fingerprint = false;
1198
1199 /* Compute the number of strides in a search area. */
1200 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1201
1202 saved_chip_number = this->current_chip;
1203 chip->select_chip(mtd, 0);
1204
1205 /*
1206 * Loop through the first search area, looking for the NCB fingerprint.
1207 */
1208 dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
1209
1210 for (stride = 0; stride < search_area_size_in_strides; stride++) {
1211 /* Compute the page and byte addresses. */
1212 page = stride * rom_geo->stride_size_in_pages;
1213 byte = page * mtd->writesize;
1214
1215 dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
1216
1217 /*
1218 * Read the NCB fingerprint. The fingerprint is four bytes long
1219 * and starts in the 12th byte of the page.
1220 */
1221 chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
1222 chip->read_buf(mtd, buffer, strlen(fingerprint));
1223
1224 /* Look for the fingerprint. */
1225 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
1226 found_an_ncb_fingerprint = true;
1227 break;
1228 }
1229
1230 }
1231
1232 chip->select_chip(mtd, saved_chip_number);
1233
1234 if (found_an_ncb_fingerprint)
1235 dev_dbg(dev, "\tFound a fingerprint\n");
1236 else
1237 dev_dbg(dev, "\tNo fingerprint found\n");
1238 return found_an_ncb_fingerprint;
1239}
1240
1241/* Writes a transcription stamp. */
1242static int __devinit mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1243{
1244 struct device *dev = this->dev;
1245 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1246 struct mtd_info *mtd = &this->mtd;
1247 struct nand_chip *chip = &this->nand;
1248 unsigned int block_size_in_pages;
1249 unsigned int search_area_size_in_strides;
1250 unsigned int search_area_size_in_pages;
1251 unsigned int search_area_size_in_blocks;
1252 unsigned int block;
1253 unsigned int stride;
1254 unsigned int page;
1255 loff_t byte;
1256 uint8_t *buffer = chip->buffers->databuf;
1257 int saved_chip_number;
1258 int status;
1259
1260 /* Compute the search area geometry. */
1261 block_size_in_pages = mtd->erasesize / mtd->writesize;
1262 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1263 search_area_size_in_pages = search_area_size_in_strides *
1264 rom_geo->stride_size_in_pages;
1265 search_area_size_in_blocks =
1266 (search_area_size_in_pages + (block_size_in_pages - 1)) /
1267 block_size_in_pages;
1268
1269 dev_dbg(dev, "Search Area Geometry :\n");
1270 dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
1271 dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
1272 dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
1273
1274 /* Select chip 0. */
1275 saved_chip_number = this->current_chip;
1276 chip->select_chip(mtd, 0);
1277
1278 /* Loop over blocks in the first search area, erasing them. */
1279 dev_dbg(dev, "Erasing the search area...\n");
1280
1281 for (block = 0; block < search_area_size_in_blocks; block++) {
1282 /* Compute the page address. */
1283 page = block * block_size_in_pages;
1284
1285 /* Erase this block. */
1286 dev_dbg(dev, "\tErasing block 0x%x\n", block);
1287 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
1288 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
1289
1290 /* Wait for the erase to finish. */
1291 status = chip->waitfunc(mtd, chip);
1292 if (status & NAND_STATUS_FAIL)
1293 dev_err(dev, "[%s] Erase failed.\n", __func__);
1294 }
1295
1296 /* Write the NCB fingerprint into the page buffer. */
1297 memset(buffer, ~0, mtd->writesize);
1298 memset(chip->oob_poi, ~0, mtd->oobsize);
1299 memcpy(buffer + 12, fingerprint, strlen(fingerprint));
1300
1301 /* Loop through the first search area, writing NCB fingerprints. */
1302 dev_dbg(dev, "Writing NCB fingerprints...\n");
1303 for (stride = 0; stride < search_area_size_in_strides; stride++) {
1304 /* Compute the page and byte addresses. */
1305 page = stride * rom_geo->stride_size_in_pages;
1306 byte = page * mtd->writesize;
1307
1308 /* Write the first page of the current stride. */
1309 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
1310 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
1311 chip->ecc.write_page_raw(mtd, chip, buffer);
1312 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1313
1314 /* Wait for the write to finish. */
1315 status = chip->waitfunc(mtd, chip);
1316 if (status & NAND_STATUS_FAIL)
1317 dev_err(dev, "[%s] Write failed.\n", __func__);
1318 }
1319
1320 /* Deselect chip 0. */
1321 chip->select_chip(mtd, saved_chip_number);
1322 return 0;
1323}
1324
1325static int __devinit mx23_boot_init(struct gpmi_nand_data *this)
1326{
1327 struct device *dev = this->dev;
1328 struct nand_chip *chip = &this->nand;
1329 struct mtd_info *mtd = &this->mtd;
1330 unsigned int block_count;
1331 unsigned int block;
1332 int chipnr;
1333 int page;
1334 loff_t byte;
1335 uint8_t block_mark;
1336 int ret = 0;
1337
1338 /*
1339 * If control arrives here, we can't use block mark swapping, which
1340 * means we're forced to use transcription. First, scan for the
1341 * transcription stamp. If we find it, then we don't have to do
1342 * anything -- the block marks are already transcribed.
1343 */
1344 if (mx23_check_transcription_stamp(this))
1345 return 0;
1346
1347 /*
1348 * If control arrives here, we couldn't find a transcription stamp, so
1349 * so we presume the block marks are in the conventional location.
1350 */
1351 dev_dbg(dev, "Transcribing bad block marks...\n");
1352
1353 /* Compute the number of blocks in the entire medium. */
1354 block_count = chip->chipsize >> chip->phys_erase_shift;
1355
1356 /*
1357 * Loop over all the blocks in the medium, transcribing block marks as
1358 * we go.
1359 */
1360 for (block = 0; block < block_count; block++) {
1361 /*
1362 * Compute the chip, page and byte addresses for this block's
1363 * conventional mark.
1364 */
1365 chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
1366 page = block << (chip->phys_erase_shift - chip->page_shift);
1367 byte = block << chip->phys_erase_shift;
1368
1369 /* Send the command to read the conventional block mark. */
1370 chip->select_chip(mtd, chipnr);
1371 chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1372 block_mark = chip->read_byte(mtd);
1373 chip->select_chip(mtd, -1);
1374
1375 /*
1376 * Check if the block is marked bad. If so, we need to mark it
1377 * again, but this time the result will be a mark in the
1378 * location where we transcribe block marks.
1379 */
1380 if (block_mark != 0xff) {
1381 dev_dbg(dev, "Transcribing mark in block %u\n", block);
1382 ret = chip->block_markbad(mtd, byte);
1383 if (ret)
1384 dev_err(dev, "Failed to mark block bad with "
1385 "ret %d\n", ret);
1386 }
1387 }
1388
1389 /* Write the stamp that indicates we've transcribed the block marks. */
1390 mx23_write_transcription_stamp(this);
1391 return 0;
1392}
1393
1394static int __devinit nand_boot_init(struct gpmi_nand_data *this)
1395{
1396 nand_boot_set_geometry(this);
1397
1398 /* This is ROM arch-specific initilization before the BBT scanning. */
1399 if (GPMI_IS_MX23(this))
1400 return mx23_boot_init(this);
1401 return 0;
1402}
1403
1404static int __devinit gpmi_set_geometry(struct gpmi_nand_data *this)
1405{
1406 int ret;
1407
1408 /* Free the temporary DMA memory for reading ID. */
1409 gpmi_free_dma_buffer(this);
1410
1411 /* Set up the NFC geometry which is used by BCH. */
1412 ret = bch_set_geometry(this);
1413 if (ret) {
1414 pr_err("set geometry ret : %d\n", ret);
1415 return ret;
1416 }
1417
1418 /* Alloc the new DMA buffers according to the pagesize and oobsize */
1419 return gpmi_alloc_dma_buffer(this);
1420}
1421
1422static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
1423{
1424 int ret;
1425
1426 /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
1427 if (GPMI_IS_MX23(this))
1428 this->swap_block_mark = false;
1429 else
1430 this->swap_block_mark = true;
1431
1432 /* Set up the medium geometry */
1433 ret = gpmi_set_geometry(this);
1434 if (ret)
1435 return ret;
1436
1437 /* NAND boot init, depends on the gpmi_set_geometry(). */
1438 return nand_boot_init(this);
1439}
1440
1441static int gpmi_scan_bbt(struct mtd_info *mtd)
1442{
1443 struct nand_chip *chip = mtd->priv;
1444 struct gpmi_nand_data *this = chip->priv;
1445 int ret;
1446
1447 /* Prepare for the BBT scan. */
1448 ret = gpmi_pre_bbt_scan(this);
1449 if (ret)
1450 return ret;
1451
1452 /* use the default BBT implementation */
1453 return nand_default_bbt(mtd);
1454}
1455
1456void gpmi_nfc_exit(struct gpmi_nand_data *this)
1457{
1458 nand_release(&this->mtd);
1459 gpmi_free_dma_buffer(this);
1460}
1461
1462static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
1463{
1464 struct gpmi_nand_platform_data *pdata = this->pdata;
1465 struct mtd_info *mtd = &this->mtd;
1466 struct nand_chip *chip = &this->nand;
1467 int ret;
1468
1469 /* init current chip */
1470 this->current_chip = -1;
1471
1472 /* init the MTD data structures */
1473 mtd->priv = chip;
1474 mtd->name = "gpmi-nand";
1475 mtd->owner = THIS_MODULE;
1476
1477 /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
1478 chip->priv = this;
1479 chip->select_chip = gpmi_select_chip;
1480 chip->cmd_ctrl = gpmi_cmd_ctrl;
1481 chip->dev_ready = gpmi_dev_ready;
1482 chip->read_byte = gpmi_read_byte;
1483 chip->read_buf = gpmi_read_buf;
1484 chip->write_buf = gpmi_write_buf;
1485 chip->ecc.read_page = gpmi_ecc_read_page;
1486 chip->ecc.write_page = gpmi_ecc_write_page;
1487 chip->ecc.read_oob = gpmi_ecc_read_oob;
1488 chip->ecc.write_oob = gpmi_ecc_write_oob;
1489 chip->scan_bbt = gpmi_scan_bbt;
1490 chip->badblock_pattern = &gpmi_bbt_descr;
1491 chip->block_markbad = gpmi_block_markbad;
1492 chip->options |= NAND_NO_SUBPAGE_WRITE;
1493 chip->ecc.mode = NAND_ECC_HW;
1494 chip->ecc.size = 1;
1495 chip->ecc.layout = &gpmi_hw_ecclayout;
1496
1497 /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
1498 this->bch_geometry.payload_size = 1024;
1499 this->bch_geometry.auxiliary_size = 128;
1500 ret = gpmi_alloc_dma_buffer(this);
1501 if (ret)
1502 goto err_out;
1503
1504 ret = nand_scan(mtd, pdata->max_chip_count);
1505 if (ret) {
1506 pr_err("Chip scan failed\n");
1507 goto err_out;
1508 }
1509
1510 ret = mtd_device_parse_register(mtd, NULL, NULL,
1511 pdata->partitions, pdata->partition_count);
1512 if (ret)
1513 goto err_out;
1514 return 0;
1515
1516err_out:
1517 gpmi_nfc_exit(this);
1518 return ret;
1519}
1520
1521static int __devinit gpmi_nand_probe(struct platform_device *pdev)
1522{
1523 struct gpmi_nand_platform_data *pdata = pdev->dev.platform_data;
1524 struct gpmi_nand_data *this;
1525 int ret;
1526
1527 this = kzalloc(sizeof(*this), GFP_KERNEL);
1528 if (!this) {
1529 pr_err("Failed to allocate per-device memory\n");
1530 return -ENOMEM;
1531 }
1532
1533 platform_set_drvdata(pdev, this);
1534 this->pdev = pdev;
1535 this->dev = &pdev->dev;
1536 this->pdata = pdata;
1537
1538 if (pdata->platform_init) {
1539 ret = pdata->platform_init();
1540 if (ret)
1541 goto platform_init_error;
1542 }
1543
1544 ret = acquire_resources(this);
1545 if (ret)
1546 goto exit_acquire_resources;
1547
1548 ret = init_hardware(this);
1549 if (ret)
1550 goto exit_nfc_init;
1551
1552 ret = gpmi_nfc_init(this);
1553 if (ret)
1554 goto exit_nfc_init;
1555
1556 return 0;
1557
1558exit_nfc_init:
1559 release_resources(this);
1560platform_init_error:
1561exit_acquire_resources:
1562 platform_set_drvdata(pdev, NULL);
1563 kfree(this);
1564 return ret;
1565}
1566
1567static int __exit gpmi_nand_remove(struct platform_device *pdev)
1568{
1569 struct gpmi_nand_data *this = platform_get_drvdata(pdev);
1570
1571 gpmi_nfc_exit(this);
1572 release_resources(this);
1573 platform_set_drvdata(pdev, NULL);
1574 kfree(this);
1575 return 0;
1576}
1577
1578static const struct platform_device_id gpmi_ids[] = {
1579 {
1580 .name = "imx23-gpmi-nand",
1581 .driver_data = IS_MX23,
1582 }, {
1583 .name = "imx28-gpmi-nand",
1584 .driver_data = IS_MX28,
1585 }, {},
1586};
1587
1588static struct platform_driver gpmi_nand_driver = {
1589 .driver = {
1590 .name = "gpmi-nand",
1591 },
1592 .probe = gpmi_nand_probe,
1593 .remove = __exit_p(gpmi_nand_remove),
1594 .id_table = gpmi_ids,
1595};
1596
1597static int __init gpmi_nand_init(void)
1598{
1599 int err;
1600
1601 err = platform_driver_register(&gpmi_nand_driver);
1602 if (err == 0)
1603 printk(KERN_INFO "GPMI NAND driver registered. (IMX)\n");
1604 else
1605 pr_err("i.MX GPMI NAND driver registration failed\n");
1606 return err;
1607}
1608
1609static void __exit gpmi_nand_exit(void)
1610{
1611 platform_driver_unregister(&gpmi_nand_driver);
1612}
1613
1614module_init(gpmi_nand_init);
1615module_exit(gpmi_nand_exit);
1616
1617MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1618MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
1619MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
new file mode 100644
index 000000000000..e023bccb7781
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -0,0 +1,273 @@
1/*
2 * Freescale GPMI NAND Flash Driver
3 *
4 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
5 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17#ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H
18#define __DRIVERS_MTD_NAND_GPMI_NAND_H
19
20#include <linux/mtd/nand.h>
21#include <linux/platform_device.h>
22#include <linux/dma-mapping.h>
23#include <mach/dma.h>
24
25struct resources {
26 void *gpmi_regs;
27 void *bch_regs;
28 unsigned int bch_low_interrupt;
29 unsigned int bch_high_interrupt;
30 unsigned int dma_low_channel;
31 unsigned int dma_high_channel;
32 struct clk *clock;
33};
34
35/**
36 * struct bch_geometry - BCH geometry description.
37 * @gf_len: The length of Galois Field. (e.g., 13 or 14)
38 * @ecc_strength: A number that describes the strength of the ECC
39 * algorithm.
40 * @page_size: The size, in bytes, of a physical page, including
41 * both data and OOB.
42 * @metadata_size: The size, in bytes, of the metadata.
43 * @ecc_chunk_size: The size, in bytes, of a single ECC chunk. Note
44 * the first chunk in the page includes both data and
45 * metadata, so it's a bit larger than this value.
46 * @ecc_chunk_count: The number of ECC chunks in the page,
47 * @payload_size: The size, in bytes, of the payload buffer.
48 * @auxiliary_size: The size, in bytes, of the auxiliary buffer.
49 * @auxiliary_status_offset: The offset into the auxiliary buffer at which
50 * the ECC status appears.
51 * @block_mark_byte_offset: The byte offset in the ECC-based page view at
52 * which the underlying physical block mark appears.
53 * @block_mark_bit_offset: The bit offset into the ECC-based page view at
54 * which the underlying physical block mark appears.
55 */
56struct bch_geometry {
57 unsigned int gf_len;
58 unsigned int ecc_strength;
59 unsigned int page_size;
60 unsigned int metadata_size;
61 unsigned int ecc_chunk_size;
62 unsigned int ecc_chunk_count;
63 unsigned int payload_size;
64 unsigned int auxiliary_size;
65 unsigned int auxiliary_status_offset;
66 unsigned int block_mark_byte_offset;
67 unsigned int block_mark_bit_offset;
68};
69
70/**
71 * struct boot_rom_geometry - Boot ROM geometry description.
72 * @stride_size_in_pages: The size of a boot block stride, in pages.
73 * @search_area_stride_exponent: The logarithm to base 2 of the size of a
74 * search area in boot block strides.
75 */
76struct boot_rom_geometry {
77 unsigned int stride_size_in_pages;
78 unsigned int search_area_stride_exponent;
79};
80
81/* DMA operations types */
82enum dma_ops_type {
83 DMA_FOR_COMMAND = 1,
84 DMA_FOR_READ_DATA,
85 DMA_FOR_WRITE_DATA,
86 DMA_FOR_READ_ECC_PAGE,
87 DMA_FOR_WRITE_ECC_PAGE
88};
89
90/**
91 * struct nand_timing - Fundamental timing attributes for NAND.
92 * @data_setup_in_ns: The data setup time, in nanoseconds. Usually the
93 * maximum of tDS and tWP. A negative value
94 * indicates this characteristic isn't known.
95 * @data_hold_in_ns: The data hold time, in nanoseconds. Usually the
96 * maximum of tDH, tWH and tREH. A negative value
97 * indicates this characteristic isn't known.
98 * @address_setup_in_ns: The address setup time, in nanoseconds. Usually
99 * the maximum of tCLS, tCS and tALS. A negative
100 * value indicates this characteristic isn't known.
101 * @gpmi_sample_delay_in_ns: A GPMI-specific timing parameter. A negative value
102 * indicates this characteristic isn't known.
103 * @tREA_in_ns: tREA, in nanoseconds, from the data sheet. A
104 * negative value indicates this characteristic isn't
105 * known.
106 * @tRLOH_in_ns: tRLOH, in nanoseconds, from the data sheet. A
107 * negative value indicates this characteristic isn't
108 * known.
109 * @tRHOH_in_ns: tRHOH, in nanoseconds, from the data sheet. A
110 * negative value indicates this characteristic isn't
111 * known.
112 */
113struct nand_timing {
114 int8_t data_setup_in_ns;
115 int8_t data_hold_in_ns;
116 int8_t address_setup_in_ns;
117 int8_t gpmi_sample_delay_in_ns;
118 int8_t tREA_in_ns;
119 int8_t tRLOH_in_ns;
120 int8_t tRHOH_in_ns;
121};
122
123struct gpmi_nand_data {
124 /* System Interface */
125 struct device *dev;
126 struct platform_device *pdev;
127 struct gpmi_nand_platform_data *pdata;
128
129 /* Resources */
130 struct resources resources;
131
132 /* Flash Hardware */
133 struct nand_timing timing;
134
135 /* BCH */
136 struct bch_geometry bch_geometry;
137 struct completion bch_done;
138
139 /* NAND Boot issue */
140 bool swap_block_mark;
141 struct boot_rom_geometry rom_geometry;
142
143 /* MTD / NAND */
144 struct nand_chip nand;
145 struct mtd_info mtd;
146
147 /* General-use Variables */
148 int current_chip;
149 unsigned int command_length;
150
151 /* passed from upper layer */
152 uint8_t *upper_buf;
153 int upper_len;
154
155 /* for DMA operations */
156 bool direct_dma_map_ok;
157
158 struct scatterlist cmd_sgl;
159 char *cmd_buffer;
160
161 struct scatterlist data_sgl;
162 char *data_buffer_dma;
163
164 void *page_buffer_virt;
165 dma_addr_t page_buffer_phys;
166 unsigned int page_buffer_size;
167
168 void *payload_virt;
169 dma_addr_t payload_phys;
170
171 void *auxiliary_virt;
172 dma_addr_t auxiliary_phys;
173
174 /* DMA channels */
175#define DMA_CHANS 8
176 struct dma_chan *dma_chans[DMA_CHANS];
177 struct mxs_dma_data dma_data;
178 enum dma_ops_type last_dma_type;
179 enum dma_ops_type dma_type;
180 struct completion dma_done;
181
182 /* private */
183 void *private;
184};
185
186/**
187 * struct gpmi_nfc_hardware_timing - GPMI hardware timing parameters.
188 * @data_setup_in_cycles: The data setup time, in cycles.
189 * @data_hold_in_cycles: The data hold time, in cycles.
190 * @address_setup_in_cycles: The address setup time, in cycles.
191 * @use_half_periods: Indicates the clock is running slowly, so the
192 * NFC DLL should use half-periods.
193 * @sample_delay_factor: The sample delay factor.
194 */
195struct gpmi_nfc_hardware_timing {
196 uint8_t data_setup_in_cycles;
197 uint8_t data_hold_in_cycles;
198 uint8_t address_setup_in_cycles;
199 bool use_half_periods;
200 uint8_t sample_delay_factor;
201};
202
203/**
204 * struct timing_threshod - Timing threshold
205 * @max_data_setup_cycles: The maximum number of data setup cycles that
206 * can be expressed in the hardware.
207 * @internal_data_setup_in_ns: The time, in ns, that the NFC hardware requires
208 * for data read internal setup. In the Reference
209 * Manual, see the chapter "High-Speed NAND
210 * Timing" for more details.
211 * @max_sample_delay_factor: The maximum sample delay factor that can be
212 * expressed in the hardware.
213 * @max_dll_clock_period_in_ns: The maximum period of the GPMI clock that the
214 * sample delay DLL hardware can possibly work
215 * with (the DLL is unusable with longer periods).
216 * If the full-cycle period is greater than HALF
217 * this value, the DLL must be configured to use
218 * half-periods.
219 * @max_dll_delay_in_ns: The maximum amount of delay, in ns, that the
220 * DLL can implement.
221 * @clock_frequency_in_hz: The clock frequency, in Hz, during the current
222 * I/O transaction. If no I/O transaction is in
223 * progress, this is the clock frequency during
224 * the most recent I/O transaction.
225 */
226struct timing_threshod {
227 const unsigned int max_chip_count;
228 const unsigned int max_data_setup_cycles;
229 const unsigned int internal_data_setup_in_ns;
230 const unsigned int max_sample_delay_factor;
231 const unsigned int max_dll_clock_period_in_ns;
232 const unsigned int max_dll_delay_in_ns;
233 unsigned long clock_frequency_in_hz;
234
235};
236
237/* Common Services */
238extern int common_nfc_set_geometry(struct gpmi_nand_data *);
239extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
240extern void prepare_data_dma(struct gpmi_nand_data *,
241 enum dma_data_direction dr);
242extern int start_dma_without_bch_irq(struct gpmi_nand_data *,
243 struct dma_async_tx_descriptor *);
244extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
245 struct dma_async_tx_descriptor *);
246
247/* GPMI-NAND helper function library */
248extern int gpmi_init(struct gpmi_nand_data *);
249extern void gpmi_clear_bch(struct gpmi_nand_data *);
250extern void gpmi_dump_info(struct gpmi_nand_data *);
251extern int bch_set_geometry(struct gpmi_nand_data *);
252extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
253extern int gpmi_send_command(struct gpmi_nand_data *);
254extern void gpmi_begin(struct gpmi_nand_data *);
255extern void gpmi_end(struct gpmi_nand_data *);
256extern int gpmi_read_data(struct gpmi_nand_data *);
257extern int gpmi_send_data(struct gpmi_nand_data *);
258extern int gpmi_send_page(struct gpmi_nand_data *,
259 dma_addr_t payload, dma_addr_t auxiliary);
260extern int gpmi_read_page(struct gpmi_nand_data *,
261 dma_addr_t payload, dma_addr_t auxiliary);
262
263/* BCH : Status Block Completion Codes */
264#define STATUS_GOOD 0x00
265#define STATUS_ERASED 0xff
266#define STATUS_UNCORRECTABLE 0xfe
267
268/* Use the platform_id to distinguish different Archs. */
269#define IS_MX23 0x1
270#define IS_MX28 0x2
271#define GPMI_IS_MX23(x) ((x)->pdev->id_entry->driver_data == IS_MX23)
272#define GPMI_IS_MX28(x) ((x)->pdev->id_entry->driver_data == IS_MX28)
273#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
new file mode 100644
index 000000000000..83431240e2f2
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
@@ -0,0 +1,172 @@
1/*
2 * Freescale GPMI NAND Flash Driver
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 * Copyright 2008 Embedded Alley Solutions, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21#ifndef __GPMI_NAND_GPMI_REGS_H
22#define __GPMI_NAND_GPMI_REGS_H
23
24#define HW_GPMI_CTRL0 0x00000000
25#define HW_GPMI_CTRL0_SET 0x00000004
26#define HW_GPMI_CTRL0_CLR 0x00000008
27#define HW_GPMI_CTRL0_TOG 0x0000000c
28
29#define BP_GPMI_CTRL0_COMMAND_MODE 24
30#define BM_GPMI_CTRL0_COMMAND_MODE (3 << BP_GPMI_CTRL0_COMMAND_MODE)
31#define BF_GPMI_CTRL0_COMMAND_MODE(v) \
32 (((v) << BP_GPMI_CTRL0_COMMAND_MODE) & BM_GPMI_CTRL0_COMMAND_MODE)
33#define BV_GPMI_CTRL0_COMMAND_MODE__WRITE 0x0
34#define BV_GPMI_CTRL0_COMMAND_MODE__READ 0x1
35#define BV_GPMI_CTRL0_COMMAND_MODE__READ_AND_COMPARE 0x2
36#define BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY 0x3
37
38#define BM_GPMI_CTRL0_WORD_LENGTH (1 << 23)
39#define BV_GPMI_CTRL0_WORD_LENGTH__16_BIT 0x0
40#define BV_GPMI_CTRL0_WORD_LENGTH__8_BIT 0x1
41
42/*
43 * Difference in LOCK_CS between imx23 and imx28 :
44 * This bit may impact the _POWER_ consumption. So some chips
45 * do not set it.
46 */
47#define MX23_BP_GPMI_CTRL0_LOCK_CS 22
48#define MX28_BP_GPMI_CTRL0_LOCK_CS 27
49#define LOCK_CS_ENABLE 0x1
50#define BF_GPMI_CTRL0_LOCK_CS(v, x) 0x0
51
52/* Difference in CS between imx23 and imx28 */
53#define BP_GPMI_CTRL0_CS 20
54#define MX23_BM_GPMI_CTRL0_CS (3 << BP_GPMI_CTRL0_CS)
55#define MX28_BM_GPMI_CTRL0_CS (7 << BP_GPMI_CTRL0_CS)
56#define BF_GPMI_CTRL0_CS(v, x) (((v) << BP_GPMI_CTRL0_CS) & \
57 (GPMI_IS_MX23((x)) \
58 ? MX23_BM_GPMI_CTRL0_CS \
59 : MX28_BM_GPMI_CTRL0_CS))
60
61#define BP_GPMI_CTRL0_ADDRESS 17
62#define BM_GPMI_CTRL0_ADDRESS (3 << BP_GPMI_CTRL0_ADDRESS)
63#define BF_GPMI_CTRL0_ADDRESS(v) \
64 (((v) << BP_GPMI_CTRL0_ADDRESS) & BM_GPMI_CTRL0_ADDRESS)
65#define BV_GPMI_CTRL0_ADDRESS__NAND_DATA 0x0
66#define BV_GPMI_CTRL0_ADDRESS__NAND_CLE 0x1
67#define BV_GPMI_CTRL0_ADDRESS__NAND_ALE 0x2
68
69#define BM_GPMI_CTRL0_ADDRESS_INCREMENT (1 << 16)
70#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__DISABLED 0x0
71#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__ENABLED 0x1
72
73#define BP_GPMI_CTRL0_XFER_COUNT 0
74#define BM_GPMI_CTRL0_XFER_COUNT (0xffff << BP_GPMI_CTRL0_XFER_COUNT)
75#define BF_GPMI_CTRL0_XFER_COUNT(v) \
76 (((v) << BP_GPMI_CTRL0_XFER_COUNT) & BM_GPMI_CTRL0_XFER_COUNT)
77
78#define HW_GPMI_COMPARE 0x00000010
79
80#define HW_GPMI_ECCCTRL 0x00000020
81#define HW_GPMI_ECCCTRL_SET 0x00000024
82#define HW_GPMI_ECCCTRL_CLR 0x00000028
83#define HW_GPMI_ECCCTRL_TOG 0x0000002c
84
85#define BP_GPMI_ECCCTRL_ECC_CMD 13
86#define BM_GPMI_ECCCTRL_ECC_CMD (3 << BP_GPMI_ECCCTRL_ECC_CMD)
87#define BF_GPMI_ECCCTRL_ECC_CMD(v) \
88 (((v) << BP_GPMI_ECCCTRL_ECC_CMD) & BM_GPMI_ECCCTRL_ECC_CMD)
89#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE 0x0
90#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE 0x1
91
92#define BM_GPMI_ECCCTRL_ENABLE_ECC (1 << 12)
93#define BV_GPMI_ECCCTRL_ENABLE_ECC__ENABLE 0x1
94#define BV_GPMI_ECCCTRL_ENABLE_ECC__DISABLE 0x0
95
96#define BP_GPMI_ECCCTRL_BUFFER_MASK 0
97#define BM_GPMI_ECCCTRL_BUFFER_MASK (0x1ff << BP_GPMI_ECCCTRL_BUFFER_MASK)
98#define BF_GPMI_ECCCTRL_BUFFER_MASK(v) \
99 (((v) << BP_GPMI_ECCCTRL_BUFFER_MASK) & BM_GPMI_ECCCTRL_BUFFER_MASK)
100#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY 0x100
101#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE 0x1FF
102
103#define HW_GPMI_ECCCOUNT 0x00000030
104#define HW_GPMI_PAYLOAD 0x00000040
105#define HW_GPMI_AUXILIARY 0x00000050
106#define HW_GPMI_CTRL1 0x00000060
107#define HW_GPMI_CTRL1_SET 0x00000064
108#define HW_GPMI_CTRL1_CLR 0x00000068
109#define HW_GPMI_CTRL1_TOG 0x0000006c
110
111#define BM_GPMI_CTRL1_BCH_MODE (1 << 18)
112
113#define BP_GPMI_CTRL1_DLL_ENABLE 17
114#define BM_GPMI_CTRL1_DLL_ENABLE (1 << BP_GPMI_CTRL1_DLL_ENABLE)
115
116#define BP_GPMI_CTRL1_HALF_PERIOD 16
117#define BM_GPMI_CTRL1_HALF_PERIOD (1 << BP_GPMI_CTRL1_HALF_PERIOD)
118
119#define BP_GPMI_CTRL1_RDN_DELAY 12
120#define BM_GPMI_CTRL1_RDN_DELAY (0xf << BP_GPMI_CTRL1_RDN_DELAY)
121#define BF_GPMI_CTRL1_RDN_DELAY(v) \
122 (((v) << BP_GPMI_CTRL1_RDN_DELAY) & BM_GPMI_CTRL1_RDN_DELAY)
123
124#define BM_GPMI_CTRL1_DEV_RESET (1 << 3)
125#define BV_GPMI_CTRL1_DEV_RESET__ENABLED 0x0
126#define BV_GPMI_CTRL1_DEV_RESET__DISABLED 0x1
127
128#define BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY (1 << 2)
129#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVELOW 0x0
130#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVEHIGH 0x1
131
132#define BM_GPMI_CTRL1_CAMERA_MODE (1 << 1)
133#define BV_GPMI_CTRL1_GPMI_MODE__NAND 0x0
134#define BV_GPMI_CTRL1_GPMI_MODE__ATA 0x1
135
136#define BM_GPMI_CTRL1_GPMI_MODE (1 << 0)
137
138#define HW_GPMI_TIMING0 0x00000070
139
140#define BP_GPMI_TIMING0_ADDRESS_SETUP 16
141#define BM_GPMI_TIMING0_ADDRESS_SETUP (0xff << BP_GPMI_TIMING0_ADDRESS_SETUP)
142#define BF_GPMI_TIMING0_ADDRESS_SETUP(v) \
143 (((v) << BP_GPMI_TIMING0_ADDRESS_SETUP) & BM_GPMI_TIMING0_ADDRESS_SETUP)
144
145#define BP_GPMI_TIMING0_DATA_HOLD 8
146#define BM_GPMI_TIMING0_DATA_HOLD (0xff << BP_GPMI_TIMING0_DATA_HOLD)
147#define BF_GPMI_TIMING0_DATA_HOLD(v) \
148 (((v) << BP_GPMI_TIMING0_DATA_HOLD) & BM_GPMI_TIMING0_DATA_HOLD)
149
150#define BP_GPMI_TIMING0_DATA_SETUP 0
151#define BM_GPMI_TIMING0_DATA_SETUP (0xff << BP_GPMI_TIMING0_DATA_SETUP)
152#define BF_GPMI_TIMING0_DATA_SETUP(v) \
153 (((v) << BP_GPMI_TIMING0_DATA_SETUP) & BM_GPMI_TIMING0_DATA_SETUP)
154
155#define HW_GPMI_TIMING1 0x00000080
156#define BP_GPMI_TIMING1_BUSY_TIMEOUT 16
157
158#define HW_GPMI_TIMING2 0x00000090
159#define HW_GPMI_DATA 0x000000a0
160
161/* MX28 uses this to detect READY. */
162#define HW_GPMI_STAT 0x000000b0
163#define MX28_BP_GPMI_STAT_READY_BUSY 24
164#define MX28_BM_GPMI_STAT_READY_BUSY (0xff << MX28_BP_GPMI_STAT_READY_BUSY)
165#define MX28_BF_GPMI_STAT_READY_BUSY(v) \
166 (((v) << MX28_BP_GPMI_STAT_READY_BUSY) & MX28_BM_GPMI_STAT_READY_BUSY)
167
168/* MX23 uses this to detect READY. */
169#define HW_GPMI_DEBUG 0x000000c0
170#define MX23_BP_GPMI_DEBUG_READY0 28
171#define MX23_BM_GPMI_DEBUG_READY0 (1 << MX23_BP_GPMI_DEBUG_READY0)
172#endif
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 02a03e67109c..5dc6f0d92f1a 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -81,9 +81,6 @@ static int h1910_device_ready(struct mtd_info *mtd)
81static int __init h1910_init(void) 81static int __init h1910_init(void)
82{ 82{
83 struct nand_chip *this; 83 struct nand_chip *this;
84 const char *part_type = 0;
85 int mtd_parts_nb = 0;
86 struct mtd_partition *mtd_parts = 0;
87 void __iomem *nandaddr; 84 void __iomem *nandaddr;
88 85
89 if (!machine_is_h1900()) 86 if (!machine_is_h1900())
@@ -136,22 +133,10 @@ static int __init h1910_init(void)
136 iounmap((void *)nandaddr); 133 iounmap((void *)nandaddr);
137 return -ENXIO; 134 return -ENXIO;
138 } 135 }
139#ifdef CONFIG_MTD_CMDLINE_PARTS
140 mtd_parts_nb = parse_cmdline_partitions(h1910_nand_mtd, &mtd_parts, "h1910-nand");
141 if (mtd_parts_nb > 0)
142 part_type = "command line";
143 else
144 mtd_parts_nb = 0;
145#endif
146 if (mtd_parts_nb == 0) {
147 mtd_parts = partition_info;
148 mtd_parts_nb = NUM_PARTITIONS;
149 part_type = "static";
150 }
151 136
152 /* Register the partitions */ 137 /* Register the partitions */
153 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 138 mtd_device_parse_register(h1910_nand_mtd, NULL, 0,
154 mtd_device_register(h1910_nand_mtd, mtd_parts, mtd_parts_nb); 139 partition_info, NUM_PARTITIONS);
155 140
156 /* Return happy */ 141 /* Return happy */
157 return 0; 142 return 0;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 6e813daed068..e2664073a89b 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -251,10 +251,6 @@ static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
251 return 0; 251 return 0;
252} 252}
253 253
254#ifdef CONFIG_MTD_CMDLINE_PARTS
255static const char *part_probes[] = {"cmdline", NULL};
256#endif
257
258static int jz_nand_ioremap_resource(struct platform_device *pdev, 254static int jz_nand_ioremap_resource(struct platform_device *pdev,
259 const char *name, struct resource **res, void __iomem **base) 255 const char *name, struct resource **res, void __iomem **base)
260{ 256{
@@ -299,8 +295,6 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
299 struct nand_chip *chip; 295 struct nand_chip *chip;
300 struct mtd_info *mtd; 296 struct mtd_info *mtd;
301 struct jz_nand_platform_data *pdata = pdev->dev.platform_data; 297 struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
302 struct mtd_partition *partition_info;
303 int num_partitions = 0;
304 298
305 nand = kzalloc(sizeof(*nand), GFP_KERNEL); 299 nand = kzalloc(sizeof(*nand), GFP_KERNEL);
306 if (!nand) { 300 if (!nand) {
@@ -373,15 +367,9 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
373 goto err_gpio_free; 367 goto err_gpio_free;
374 } 368 }
375 369
376#ifdef CONFIG_MTD_CMDLINE_PARTS 370 ret = mtd_device_parse_register(mtd, NULL, 0,
377 num_partitions = parse_mtd_partitions(mtd, part_probes, 371 pdata ? pdata->partitions : NULL,
378 &partition_info, 0); 372 pdata ? pdata->num_partitions : 0);
379#endif
380 if (num_partitions <= 0 && pdata) {
381 num_partitions = pdata->num_partitions;
382 partition_info = pdata->partitions;
383 }
384 ret = mtd_device_register(mtd, partition_info, num_partitions);
385 373
386 if (ret) { 374 if (ret) {
387 dev_err(&pdev->dev, "Failed to add mtd device\n"); 375 dev_err(&pdev->dev, "Failed to add mtd device\n");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index eb1fbac63eb6..5ede64706346 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -131,8 +131,6 @@ struct mpc5121_nfc_prv {
131 131
132static void mpc5121_nfc_done(struct mtd_info *mtd); 132static void mpc5121_nfc_done(struct mtd_info *mtd);
133 133
134static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
135
136/* Read NFC register */ 134/* Read NFC register */
137static inline u16 nfc_read(struct mtd_info *mtd, uint reg) 135static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
138{ 136{
@@ -656,13 +654,13 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
656 struct mpc5121_nfc_prv *prv; 654 struct mpc5121_nfc_prv *prv;
657 struct resource res; 655 struct resource res;
658 struct mtd_info *mtd; 656 struct mtd_info *mtd;
659 struct mtd_partition *parts;
660 struct nand_chip *chip; 657 struct nand_chip *chip;
661 unsigned long regs_paddr, regs_size; 658 unsigned long regs_paddr, regs_size;
662 const __be32 *chips_no; 659 const __be32 *chips_no;
663 int resettime = 0; 660 int resettime = 0;
664 int retval = 0; 661 int retval = 0;
665 int rev, len; 662 int rev, len;
663 struct mtd_part_parser_data ppdata;
666 664
667 /* 665 /*
668 * Check SoC revision. This driver supports only NFC 666 * Check SoC revision. This driver supports only NFC
@@ -727,6 +725,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
727 } 725 }
728 726
729 mtd->name = "MPC5121 NAND"; 727 mtd->name = "MPC5121 NAND";
728 ppdata.of_node = dn;
730 chip->dev_ready = mpc5121_nfc_dev_ready; 729 chip->dev_ready = mpc5121_nfc_dev_ready;
731 chip->cmdfunc = mpc5121_nfc_command; 730 chip->cmdfunc = mpc5121_nfc_command;
732 chip->read_byte = mpc5121_nfc_read_byte; 731 chip->read_byte = mpc5121_nfc_read_byte;
@@ -735,7 +734,8 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
735 chip->write_buf = mpc5121_nfc_write_buf; 734 chip->write_buf = mpc5121_nfc_write_buf;
736 chip->verify_buf = mpc5121_nfc_verify_buf; 735 chip->verify_buf = mpc5121_nfc_verify_buf;
737 chip->select_chip = mpc5121_nfc_select_chip; 736 chip->select_chip = mpc5121_nfc_select_chip;
738 chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT; 737 chip->options = NAND_NO_AUTOINCR;
738 chip->bbt_options = NAND_BBT_USE_FLASH;
739 chip->ecc.mode = NAND_ECC_SOFT; 739 chip->ecc.mode = NAND_ECC_SOFT;
740 740
741 /* Support external chip-select logic on ADS5121 board */ 741 /* Support external chip-select logic on ADS5121 board */
@@ -837,19 +837,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
837 dev_set_drvdata(dev, mtd); 837 dev_set_drvdata(dev, mtd);
838 838
839 /* Register device in MTD */ 839 /* Register device in MTD */
840 retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0); 840 retval = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
841#ifdef CONFIG_MTD_OF_PARTS
842 if (retval == 0)
843 retval = of_mtd_parse_partitions(dev, dn, &parts);
844#endif
845 if (retval < 0) {
846 dev_err(dev, "Error parsing MTD partitions!\n");
847 devm_free_irq(dev, prv->irq, mtd);
848 retval = -EINVAL;
849 goto error;
850 }
851
852 retval = mtd_device_register(mtd, parts, retval);
853 if (retval) { 841 if (retval) {
854 dev_err(dev, "Error adding MTD device!\n"); 842 dev_err(dev, "Error adding MTD device!\n");
855 devm_free_irq(dev, prv->irq, mtd); 843 devm_free_irq(dev, prv->irq, mtd);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 90df34c4d26c..74a43b818d0e 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -41,7 +41,7 @@
41 41
42#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35()) 42#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
43#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21()) 43#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
44#define nfc_is_v3_2() cpu_is_mx51() 44#define nfc_is_v3_2() (cpu_is_mx51() || cpu_is_mx53())
45#define nfc_is_v3() nfc_is_v3_2() 45#define nfc_is_v3() nfc_is_v3_2()
46 46
47/* Addresses for NFC registers */ 47/* Addresses for NFC registers */
@@ -143,7 +143,6 @@
143struct mxc_nand_host { 143struct mxc_nand_host {
144 struct mtd_info mtd; 144 struct mtd_info mtd;
145 struct nand_chip nand; 145 struct nand_chip nand;
146 struct mtd_partition *parts;
147 struct device *dev; 146 struct device *dev;
148 147
149 void *spare0; 148 void *spare0;
@@ -350,8 +349,7 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)
350 udelay(1); 349 udelay(1);
351 } 350 }
352 if (max_retries < 0) 351 if (max_retries < 0)
353 DEBUG(MTD_DEBUG_LEVEL0, "%s: INT not set\n", 352 pr_debug("%s: INT not set\n", __func__);
354 __func__);
355 } 353 }
356} 354}
357 355
@@ -371,7 +369,7 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
371 * waits for completion. */ 369 * waits for completion. */
372static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) 370static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
373{ 371{
374 DEBUG(MTD_DEBUG_LEVEL3, "send_cmd(host, 0x%x, %d)\n", cmd, useirq); 372 pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq);
375 373
376 writew(cmd, NFC_V1_V2_FLASH_CMD); 374 writew(cmd, NFC_V1_V2_FLASH_CMD);
377 writew(NFC_CMD, NFC_V1_V2_CONFIG2); 375 writew(NFC_CMD, NFC_V1_V2_CONFIG2);
@@ -387,8 +385,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
387 udelay(1); 385 udelay(1);
388 } 386 }
389 if (max_retries < 0) 387 if (max_retries < 0)
390 DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n", 388 pr_debug("%s: RESET failed\n", __func__);
391 __func__);
392 } else { 389 } else {
393 /* Wait for operation to complete */ 390 /* Wait for operation to complete */
394 wait_op_done(host, useirq); 391 wait_op_done(host, useirq);
@@ -411,7 +408,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
411 * a NAND command. */ 408 * a NAND command. */
412static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast) 409static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
413{ 410{
414 DEBUG(MTD_DEBUG_LEVEL3, "send_addr(host, 0x%x %d)\n", addr, islast); 411 pr_debug("send_addr(host, 0x%x %d)\n", addr, islast);
415 412
416 writew(addr, NFC_V1_V2_FLASH_ADDR); 413 writew(addr, NFC_V1_V2_FLASH_ADDR);
417 writew(NFC_ADDR, NFC_V1_V2_CONFIG2); 414 writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
@@ -561,8 +558,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
561 uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT); 558 uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT);
562 559
563 if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { 560 if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
564 DEBUG(MTD_DEBUG_LEVEL0, 561 pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
565 "MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
566 return -1; 562 return -1;
567 } 563 }
568 564
@@ -849,7 +845,7 @@ static void preset_v1_v2(struct mtd_info *mtd)
849 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3); 845 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
850 } else if (nfc_is_v1()) { 846 } else if (nfc_is_v1()) {
851 writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR); 847 writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
852 writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR); 848 writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
853 } else 849 } else
854 BUG(); 850 BUG();
855 851
@@ -932,8 +928,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
932 struct nand_chip *nand_chip = mtd->priv; 928 struct nand_chip *nand_chip = mtd->priv;
933 struct mxc_nand_host *host = nand_chip->priv; 929 struct mxc_nand_host *host = nand_chip->priv;
934 930
935 DEBUG(MTD_DEBUG_LEVEL3, 931 pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
936 "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
937 command, column, page_addr); 932 command, column, page_addr);
938 933
939 /* Reset command state information */ 934 /* Reset command state information */
@@ -1044,7 +1039,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1044 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data; 1039 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
1045 struct mxc_nand_host *host; 1040 struct mxc_nand_host *host;
1046 struct resource *res; 1041 struct resource *res;
1047 int err = 0, __maybe_unused nr_parts = 0; 1042 int err = 0;
1048 struct nand_ecclayout *oob_smallpage, *oob_largepage; 1043 struct nand_ecclayout *oob_smallpage, *oob_largepage;
1049 1044
1050 /* Allocate memory for MTD device structure and private data */ 1045 /* Allocate memory for MTD device structure and private data */
@@ -1179,7 +1174,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1179 this->bbt_td = &bbt_main_descr; 1174 this->bbt_td = &bbt_main_descr;
1180 this->bbt_md = &bbt_mirror_descr; 1175 this->bbt_md = &bbt_mirror_descr;
1181 /* update flash based bbt */ 1176 /* update flash based bbt */
1182 this->options |= NAND_USE_FLASH_BBT; 1177 this->bbt_options |= NAND_BBT_USE_FLASH;
1183 } 1178 }
1184 1179
1185 init_completion(&host->op_completion); 1180 init_completion(&host->op_completion);
@@ -1231,16 +1226,8 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1231 } 1226 }
1232 1227
1233 /* Register the partitions */ 1228 /* Register the partitions */
1234 nr_parts = 1229 mtd_device_parse_register(mtd, part_probes, 0,
1235 parse_mtd_partitions(mtd, part_probes, &host->parts, 0); 1230 pdata->parts, pdata->nr_parts);
1236 if (nr_parts > 0)
1237 mtd_device_register(mtd, host->parts, nr_parts);
1238 else if (pdata->parts)
1239 mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
1240 else {
1241 pr_info("Registering %s as whole device\n", mtd->name);
1242 mtd_device_register(mtd, NULL, 0);
1243 }
1244 1231
1245 platform_set_drvdata(pdev, host); 1232 platform_set_drvdata(pdev, host);
1246 1233
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index a46e9bb847bd..3ed9c5e4d34e 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -21,7 +21,7 @@
21 * TODO: 21 * TODO:
22 * Enable cached programming for 2k page size chips 22 * Enable cached programming for 2k page size chips
23 * Check, if mtd->ecctype should be set to MTD_ECC_HW 23 * Check, if mtd->ecctype should be set to MTD_ECC_HW
24 * if we have HW ecc support. 24 * if we have HW ECC support.
25 * The AG-AND chips have nice features for speed improvement, 25 * The AG-AND chips have nice features for speed improvement,
26 * which are not supported yet. Read / program 4 pages in one go. 26 * which are not supported yet. Read / program 4 pages in one go.
27 * BBT table is not serialized, has to be fixed 27 * BBT table is not serialized, has to be fixed
@@ -113,21 +113,19 @@ static int check_offs_len(struct mtd_info *mtd,
113 113
114 /* Start address must align on block boundary */ 114 /* Start address must align on block boundary */
115 if (ofs & ((1 << chip->phys_erase_shift) - 1)) { 115 if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
116 DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__); 116 pr_debug("%s: unaligned address\n", __func__);
117 ret = -EINVAL; 117 ret = -EINVAL;
118 } 118 }
119 119
120 /* Length must align on block boundary */ 120 /* Length must align on block boundary */
121 if (len & ((1 << chip->phys_erase_shift) - 1)) { 121 if (len & ((1 << chip->phys_erase_shift) - 1)) {
122 DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n", 122 pr_debug("%s: length not block aligned\n", __func__);
123 __func__);
124 ret = -EINVAL; 123 ret = -EINVAL;
125 } 124 }
126 125
127 /* Do not allow past end of device */ 126 /* Do not allow past end of device */
128 if (ofs + len > mtd->size) { 127 if (ofs + len > mtd->size) {
129 DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n", 128 pr_debug("%s: past end of device\n", __func__);
130 __func__);
131 ret = -EINVAL; 129 ret = -EINVAL;
132 } 130 }
133 131
@@ -136,9 +134,9 @@ static int check_offs_len(struct mtd_info *mtd,
136 134
137/** 135/**
138 * nand_release_device - [GENERIC] release chip 136 * nand_release_device - [GENERIC] release chip
139 * @mtd: MTD device structure 137 * @mtd: MTD device structure
140 * 138 *
141 * Deselect, release chip lock and wake up anyone waiting on the device 139 * Deselect, release chip lock and wake up anyone waiting on the device.
142 */ 140 */
143static void nand_release_device(struct mtd_info *mtd) 141static void nand_release_device(struct mtd_info *mtd)
144{ 142{
@@ -157,9 +155,9 @@ static void nand_release_device(struct mtd_info *mtd)
157 155
158/** 156/**
159 * nand_read_byte - [DEFAULT] read one byte from the chip 157 * nand_read_byte - [DEFAULT] read one byte from the chip
160 * @mtd: MTD device structure 158 * @mtd: MTD device structure
161 * 159 *
162 * Default read function for 8bit buswith 160 * Default read function for 8bit buswidth
163 */ 161 */
164static uint8_t nand_read_byte(struct mtd_info *mtd) 162static uint8_t nand_read_byte(struct mtd_info *mtd)
165{ 163{
@@ -169,10 +167,11 @@ static uint8_t nand_read_byte(struct mtd_info *mtd)
169 167
170/** 168/**
171 * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip 169 * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
172 * @mtd: MTD device structure 170 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
171 * @mtd: MTD device structure
172 *
173 * Default read function for 16bit buswidth with endianness conversion.
173 * 174 *
174 * Default read function for 16bit buswith with
175 * endianess conversion
176 */ 175 */
177static uint8_t nand_read_byte16(struct mtd_info *mtd) 176static uint8_t nand_read_byte16(struct mtd_info *mtd)
178{ 177{
@@ -182,10 +181,9 @@ static uint8_t nand_read_byte16(struct mtd_info *mtd)
182 181
183/** 182/**
184 * nand_read_word - [DEFAULT] read one word from the chip 183 * nand_read_word - [DEFAULT] read one word from the chip
185 * @mtd: MTD device structure 184 * @mtd: MTD device structure
186 * 185 *
187 * Default read function for 16bit buswith without 186 * Default read function for 16bit buswidth without endianness conversion.
188 * endianess conversion
189 */ 187 */
190static u16 nand_read_word(struct mtd_info *mtd) 188static u16 nand_read_word(struct mtd_info *mtd)
191{ 189{
@@ -195,8 +193,8 @@ static u16 nand_read_word(struct mtd_info *mtd)
195 193
196/** 194/**
197 * nand_select_chip - [DEFAULT] control CE line 195 * nand_select_chip - [DEFAULT] control CE line
198 * @mtd: MTD device structure 196 * @mtd: MTD device structure
199 * @chipnr: chipnumber to select, -1 for deselect 197 * @chipnr: chipnumber to select, -1 for deselect
200 * 198 *
201 * Default select function for 1 chip devices. 199 * Default select function for 1 chip devices.
202 */ 200 */
@@ -218,11 +216,11 @@ static void nand_select_chip(struct mtd_info *mtd, int chipnr)
218 216
219/** 217/**
220 * nand_write_buf - [DEFAULT] write buffer to chip 218 * nand_write_buf - [DEFAULT] write buffer to chip
221 * @mtd: MTD device structure 219 * @mtd: MTD device structure
222 * @buf: data buffer 220 * @buf: data buffer
223 * @len: number of bytes to write 221 * @len: number of bytes to write
224 * 222 *
225 * Default write function for 8bit buswith 223 * Default write function for 8bit buswidth.
226 */ 224 */
227static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 225static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
228{ 226{
@@ -235,11 +233,11 @@ static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
235 233
236/** 234/**
237 * nand_read_buf - [DEFAULT] read chip data into buffer 235 * nand_read_buf - [DEFAULT] read chip data into buffer
238 * @mtd: MTD device structure 236 * @mtd: MTD device structure
239 * @buf: buffer to store date 237 * @buf: buffer to store date
240 * @len: number of bytes to read 238 * @len: number of bytes to read
241 * 239 *
242 * Default read function for 8bit buswith 240 * Default read function for 8bit buswidth.
243 */ 241 */
244static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 242static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
245{ 243{
@@ -252,11 +250,11 @@ static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
252 250
253/** 251/**
254 * nand_verify_buf - [DEFAULT] Verify chip data against buffer 252 * nand_verify_buf - [DEFAULT] Verify chip data against buffer
255 * @mtd: MTD device structure 253 * @mtd: MTD device structure
256 * @buf: buffer containing the data to compare 254 * @buf: buffer containing the data to compare
257 * @len: number of bytes to compare 255 * @len: number of bytes to compare
258 * 256 *
259 * Default verify function for 8bit buswith 257 * Default verify function for 8bit buswidth.
260 */ 258 */
261static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 259static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
262{ 260{
@@ -271,11 +269,11 @@ static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
271 269
272/** 270/**
273 * nand_write_buf16 - [DEFAULT] write buffer to chip 271 * nand_write_buf16 - [DEFAULT] write buffer to chip
274 * @mtd: MTD device structure 272 * @mtd: MTD device structure
275 * @buf: data buffer 273 * @buf: data buffer
276 * @len: number of bytes to write 274 * @len: number of bytes to write
277 * 275 *
278 * Default write function for 16bit buswith 276 * Default write function for 16bit buswidth.
279 */ 277 */
280static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) 278static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
281{ 279{
@@ -291,11 +289,11 @@ static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
291 289
292/** 290/**
293 * nand_read_buf16 - [DEFAULT] read chip data into buffer 291 * nand_read_buf16 - [DEFAULT] read chip data into buffer
294 * @mtd: MTD device structure 292 * @mtd: MTD device structure
295 * @buf: buffer to store date 293 * @buf: buffer to store date
296 * @len: number of bytes to read 294 * @len: number of bytes to read
297 * 295 *
298 * Default read function for 16bit buswith 296 * Default read function for 16bit buswidth.
299 */ 297 */
300static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) 298static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
301{ 299{
@@ -310,11 +308,11 @@ static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
310 308
311/** 309/**
312 * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer 310 * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer
313 * @mtd: MTD device structure 311 * @mtd: MTD device structure
314 * @buf: buffer containing the data to compare 312 * @buf: buffer containing the data to compare
315 * @len: number of bytes to compare 313 * @len: number of bytes to compare
316 * 314 *
317 * Default verify function for 16bit buswith 315 * Default verify function for 16bit buswidth.
318 */ 316 */
319static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) 317static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
320{ 318{
@@ -332,9 +330,9 @@ static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
332 330
333/** 331/**
334 * nand_block_bad - [DEFAULT] Read bad block marker from the chip 332 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
335 * @mtd: MTD device structure 333 * @mtd: MTD device structure
336 * @ofs: offset from device start 334 * @ofs: offset from device start
337 * @getchip: 0, if the chip is already selected 335 * @getchip: 0, if the chip is already selected
338 * 336 *
339 * Check, if the block is bad. 337 * Check, if the block is bad.
340 */ 338 */
@@ -344,7 +342,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
344 struct nand_chip *chip = mtd->priv; 342 struct nand_chip *chip = mtd->priv;
345 u16 bad; 343 u16 bad;
346 344
347 if (chip->options & NAND_BBT_SCANLASTPAGE) 345 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
348 ofs += mtd->erasesize - mtd->writesize; 346 ofs += mtd->erasesize - mtd->writesize;
349 347
350 page = (int)(ofs >> chip->page_shift) & chip->pagemask; 348 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
@@ -384,11 +382,11 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
384 382
385/** 383/**
386 * nand_default_block_markbad - [DEFAULT] mark a block bad 384 * nand_default_block_markbad - [DEFAULT] mark a block bad
387 * @mtd: MTD device structure 385 * @mtd: MTD device structure
388 * @ofs: offset from device start 386 * @ofs: offset from device start
389 * 387 *
390 * This is the default implementation, which can be overridden by 388 * This is the default implementation, which can be overridden by a hardware
391 * a hardware specific driver. 389 * specific driver.
392*/ 390*/
393static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) 391static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
394{ 392{
@@ -396,7 +394,7 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
396 uint8_t buf[2] = { 0, 0 }; 394 uint8_t buf[2] = { 0, 0 };
397 int block, ret, i = 0; 395 int block, ret, i = 0;
398 396
399 if (chip->options & NAND_BBT_SCANLASTPAGE) 397 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
400 ofs += mtd->erasesize - mtd->writesize; 398 ofs += mtd->erasesize - mtd->writesize;
401 399
402 /* Get block number */ 400 /* Get block number */
@@ -404,33 +402,31 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
404 if (chip->bbt) 402 if (chip->bbt)
405 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); 403 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
406 404
407 /* Do we have a flash based bad block table ? */ 405 /* Do we have a flash based bad block table? */
408 if (chip->options & NAND_USE_FLASH_BBT) 406 if (chip->bbt_options & NAND_BBT_USE_FLASH)
409 ret = nand_update_bbt(mtd, ofs); 407 ret = nand_update_bbt(mtd, ofs);
410 else { 408 else {
409 struct mtd_oob_ops ops;
410
411 nand_get_device(chip, mtd, FL_WRITING); 411 nand_get_device(chip, mtd, FL_WRITING);
412 412
413 /* Write to first two pages and to byte 1 and 6 if necessary. 413 /*
414 * If we write to more than one location, the first error 414 * Write to first two pages if necessary. If we write to more
415 * encountered quits the procedure. We write two bytes per 415 * than one location, the first error encountered quits the
416 * location, so we dont have to mess with 16 bit access. 416 * procedure. We write two bytes per location, so we dont have
417 * to mess with 16 bit access.
417 */ 418 */
419 ops.len = ops.ooblen = 2;
420 ops.datbuf = NULL;
421 ops.oobbuf = buf;
422 ops.ooboffs = chip->badblockpos & ~0x01;
423 ops.mode = MTD_OPS_PLACE_OOB;
418 do { 424 do {
419 chip->ops.len = chip->ops.ooblen = 2; 425 ret = nand_do_write_oob(mtd, ofs, &ops);
420 chip->ops.datbuf = NULL;
421 chip->ops.oobbuf = buf;
422 chip->ops.ooboffs = chip->badblockpos & ~0x01;
423
424 ret = nand_do_write_oob(mtd, ofs, &chip->ops);
425 426
426 if (!ret && (chip->options & NAND_BBT_SCANBYTE1AND6)) {
427 chip->ops.ooboffs = NAND_SMALL_BADBLOCK_POS
428 & ~0x01;
429 ret = nand_do_write_oob(mtd, ofs, &chip->ops);
430 }
431 i++; 427 i++;
432 ofs += mtd->writesize; 428 ofs += mtd->writesize;
433 } while (!ret && (chip->options & NAND_BBT_SCAN2NDPAGE) && 429 } while (!ret && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE) &&
434 i < 2); 430 i < 2);
435 431
436 nand_release_device(mtd); 432 nand_release_device(mtd);
@@ -443,16 +439,16 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
443 439
444/** 440/**
445 * nand_check_wp - [GENERIC] check if the chip is write protected 441 * nand_check_wp - [GENERIC] check if the chip is write protected
446 * @mtd: MTD device structure 442 * @mtd: MTD device structure
447 * Check, if the device is write protected
448 * 443 *
449 * The function expects, that the device is already selected 444 * Check, if the device is write protected. The function expects, that the
445 * device is already selected.
450 */ 446 */
451static int nand_check_wp(struct mtd_info *mtd) 447static int nand_check_wp(struct mtd_info *mtd)
452{ 448{
453 struct nand_chip *chip = mtd->priv; 449 struct nand_chip *chip = mtd->priv;
454 450
455 /* broken xD cards report WP despite being writable */ 451 /* Broken xD cards report WP despite being writable */
456 if (chip->options & NAND_BROKEN_XD) 452 if (chip->options & NAND_BROKEN_XD)
457 return 0; 453 return 0;
458 454
@@ -463,10 +459,10 @@ static int nand_check_wp(struct mtd_info *mtd)
463 459
464/** 460/**
465 * nand_block_checkbad - [GENERIC] Check if a block is marked bad 461 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
466 * @mtd: MTD device structure 462 * @mtd: MTD device structure
467 * @ofs: offset from device start 463 * @ofs: offset from device start
468 * @getchip: 0, if the chip is already selected 464 * @getchip: 0, if the chip is already selected
469 * @allowbbt: 1, if its allowed to access the bbt area 465 * @allowbbt: 1, if its allowed to access the bbt area
470 * 466 *
471 * Check, if the block is bad. Either by reading the bad block table or 467 * Check, if the block is bad. Either by reading the bad block table or
472 * calling of the scan function. 468 * calling of the scan function.
@@ -485,8 +481,8 @@ static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
485 481
486/** 482/**
487 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands. 483 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
488 * @mtd: MTD device structure 484 * @mtd: MTD device structure
489 * @timeo: Timeout 485 * @timeo: Timeout
490 * 486 *
491 * Helper function for nand_wait_ready used when needing to wait in interrupt 487 * Helper function for nand_wait_ready used when needing to wait in interrupt
492 * context. 488 * context.
@@ -505,10 +501,7 @@ static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
505 } 501 }
506} 502}
507 503
508/* 504/* Wait for the ready pin, after a command. The timeout is caught later. */
509 * Wait for the ready pin, after a command
510 * The timeout is catched later.
511 */
512void nand_wait_ready(struct mtd_info *mtd) 505void nand_wait_ready(struct mtd_info *mtd)
513{ 506{
514 struct nand_chip *chip = mtd->priv; 507 struct nand_chip *chip = mtd->priv;
@@ -519,7 +512,7 @@ void nand_wait_ready(struct mtd_info *mtd)
519 return panic_nand_wait_ready(mtd, 400); 512 return panic_nand_wait_ready(mtd, 400);
520 513
521 led_trigger_event(nand_led_trigger, LED_FULL); 514 led_trigger_event(nand_led_trigger, LED_FULL);
522 /* wait until command is processed or timeout occures */ 515 /* Wait until command is processed or timeout occurs */
523 do { 516 do {
524 if (chip->dev_ready(mtd)) 517 if (chip->dev_ready(mtd))
525 break; 518 break;
@@ -531,13 +524,13 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
531 524
532/** 525/**
533 * nand_command - [DEFAULT] Send command to NAND device 526 * nand_command - [DEFAULT] Send command to NAND device
534 * @mtd: MTD device structure 527 * @mtd: MTD device structure
535 * @command: the command to be sent 528 * @command: the command to be sent
536 * @column: the column address for this command, -1 if none 529 * @column: the column address for this command, -1 if none
537 * @page_addr: the page address for this command, -1 if none 530 * @page_addr: the page address for this command, -1 if none
538 * 531 *
539 * Send command to NAND device. This function is used for small page 532 * Send command to NAND device. This function is used for small page devices
540 * devices (256/512 Bytes per page) 533 * (256/512 Bytes per page).
541 */ 534 */
542static void nand_command(struct mtd_info *mtd, unsigned int command, 535static void nand_command(struct mtd_info *mtd, unsigned int command,
543 int column, int page_addr) 536 int column, int page_addr)
@@ -545,9 +538,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
545 register struct nand_chip *chip = mtd->priv; 538 register struct nand_chip *chip = mtd->priv;
546 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE; 539 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
547 540
548 /* 541 /* Write out the command to the device */
549 * Write out the command to the device.
550 */
551 if (command == NAND_CMD_SEQIN) { 542 if (command == NAND_CMD_SEQIN) {
552 int readcmd; 543 int readcmd;
553 544
@@ -567,9 +558,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
567 } 558 }
568 chip->cmd_ctrl(mtd, command, ctrl); 559 chip->cmd_ctrl(mtd, command, ctrl);
569 560
570 /* 561 /* Address cycle, when necessary */
571 * Address cycle, when necessary
572 */
573 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE; 562 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
574 /* Serially input address */ 563 /* Serially input address */
575 if (column != -1) { 564 if (column != -1) {
@@ -590,8 +579,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
590 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); 579 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
591 580
592 /* 581 /*
593 * program and erase have their own busy handlers 582 * Program and erase have their own busy handlers status and sequential
594 * status and sequential in needs no delay 583 * in needs no delay
595 */ 584 */
596 switch (command) { 585 switch (command) {
597 586
@@ -625,8 +614,10 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
625 return; 614 return;
626 } 615 }
627 } 616 }
628 /* Apply this short delay always to ensure that we do wait tWB in 617 /*
629 * any case on any machine. */ 618 * Apply this short delay always to ensure that we do wait tWB in
619 * any case on any machine.
620 */
630 ndelay(100); 621 ndelay(100);
631 622
632 nand_wait_ready(mtd); 623 nand_wait_ready(mtd);
@@ -634,14 +625,14 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
634 625
635/** 626/**
636 * nand_command_lp - [DEFAULT] Send command to NAND large page device 627 * nand_command_lp - [DEFAULT] Send command to NAND large page device
637 * @mtd: MTD device structure 628 * @mtd: MTD device structure
638 * @command: the command to be sent 629 * @command: the command to be sent
639 * @column: the column address for this command, -1 if none 630 * @column: the column address for this command, -1 if none
640 * @page_addr: the page address for this command, -1 if none 631 * @page_addr: the page address for this command, -1 if none
641 * 632 *
642 * Send command to NAND device. This is the version for the new large page 633 * Send command to NAND device. This is the version for the new large page
643 * devices We dont have the separate regions as we have in the small page 634 * devices. We don't have the separate regions as we have in the small page
644 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible. 635 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
645 */ 636 */
646static void nand_command_lp(struct mtd_info *mtd, unsigned int command, 637static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
647 int column, int page_addr) 638 int column, int page_addr)
@@ -683,8 +674,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
683 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); 674 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
684 675
685 /* 676 /*
686 * program and erase have their own busy handlers 677 * Program and erase have their own busy handlers status, sequential
687 * status, sequential in, and deplete1 need no delay 678 * in, and deplete1 need no delay.
688 */ 679 */
689 switch (command) { 680 switch (command) {
690 681
@@ -698,14 +689,12 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
698 case NAND_CMD_DEPLETE1: 689 case NAND_CMD_DEPLETE1:
699 return; 690 return;
700 691
701 /*
702 * read error status commands require only a short delay
703 */
704 case NAND_CMD_STATUS_ERROR: 692 case NAND_CMD_STATUS_ERROR:
705 case NAND_CMD_STATUS_ERROR0: 693 case NAND_CMD_STATUS_ERROR0:
706 case NAND_CMD_STATUS_ERROR1: 694 case NAND_CMD_STATUS_ERROR1:
707 case NAND_CMD_STATUS_ERROR2: 695 case NAND_CMD_STATUS_ERROR2:
708 case NAND_CMD_STATUS_ERROR3: 696 case NAND_CMD_STATUS_ERROR3:
697 /* Read error status commands require only a short delay */
709 udelay(chip->chip_delay); 698 udelay(chip->chip_delay);
710 return; 699 return;
711 700
@@ -739,7 +728,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
739 default: 728 default:
740 /* 729 /*
741 * If we don't have access to the busy pin, we apply the given 730 * If we don't have access to the busy pin, we apply the given
742 * command delay 731 * command delay.
743 */ 732 */
744 if (!chip->dev_ready) { 733 if (!chip->dev_ready) {
745 udelay(chip->chip_delay); 734 udelay(chip->chip_delay);
@@ -747,8 +736,10 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
747 } 736 }
748 } 737 }
749 738
750 /* Apply this short delay always to ensure that we do wait tWB in 739 /*
751 * any case on any machine. */ 740 * Apply this short delay always to ensure that we do wait tWB in
741 * any case on any machine.
742 */
752 ndelay(100); 743 ndelay(100);
753 744
754 nand_wait_ready(mtd); 745 nand_wait_ready(mtd);
@@ -756,25 +747,25 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
756 747
757/** 748/**
758 * panic_nand_get_device - [GENERIC] Get chip for selected access 749 * panic_nand_get_device - [GENERIC] Get chip for selected access
759 * @chip: the nand chip descriptor 750 * @chip: the nand chip descriptor
760 * @mtd: MTD device structure 751 * @mtd: MTD device structure
761 * @new_state: the state which is requested 752 * @new_state: the state which is requested
762 * 753 *
763 * Used when in panic, no locks are taken. 754 * Used when in panic, no locks are taken.
764 */ 755 */
765static void panic_nand_get_device(struct nand_chip *chip, 756static void panic_nand_get_device(struct nand_chip *chip,
766 struct mtd_info *mtd, int new_state) 757 struct mtd_info *mtd, int new_state)
767{ 758{
768 /* Hardware controller shared among independend devices */ 759 /* Hardware controller shared among independent devices */
769 chip->controller->active = chip; 760 chip->controller->active = chip;
770 chip->state = new_state; 761 chip->state = new_state;
771} 762}
772 763
773/** 764/**
774 * nand_get_device - [GENERIC] Get chip for selected access 765 * nand_get_device - [GENERIC] Get chip for selected access
775 * @chip: the nand chip descriptor 766 * @chip: the nand chip descriptor
776 * @mtd: MTD device structure 767 * @mtd: MTD device structure
777 * @new_state: the state which is requested 768 * @new_state: the state which is requested
778 * 769 *
779 * Get the device and lock it for exclusive access 770 * Get the device and lock it for exclusive access
780 */ 771 */
@@ -812,10 +803,10 @@ retry:
812} 803}
813 804
814/** 805/**
815 * panic_nand_wait - [GENERIC] wait until the command is done 806 * panic_nand_wait - [GENERIC] wait until the command is done
816 * @mtd: MTD device structure 807 * @mtd: MTD device structure
817 * @chip: NAND chip structure 808 * @chip: NAND chip structure
818 * @timeo: Timeout 809 * @timeo: timeout
819 * 810 *
820 * Wait for command done. This is a helper function for nand_wait used when 811 * Wait for command done. This is a helper function for nand_wait used when
821 * we are in interrupt context. May happen when in panic and trying to write 812 * we are in interrupt context. May happen when in panic and trying to write
@@ -838,13 +829,13 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
838} 829}
839 830
840/** 831/**
841 * nand_wait - [DEFAULT] wait until the command is done 832 * nand_wait - [DEFAULT] wait until the command is done
842 * @mtd: MTD device structure 833 * @mtd: MTD device structure
843 * @chip: NAND chip structure 834 * @chip: NAND chip structure
844 * 835 *
845 * Wait for command done. This applies to erase and program only 836 * Wait for command done. This applies to erase and program only. Erase can
846 * Erase can take up to 400ms and program up to 20ms according to 837 * take up to 400ms and program up to 20ms according to general NAND and
847 * general NAND and SmartMedia specs 838 * SmartMedia specs.
848 */ 839 */
849static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) 840static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
850{ 841{
@@ -859,8 +850,10 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
859 850
860 led_trigger_event(nand_led_trigger, LED_FULL); 851 led_trigger_event(nand_led_trigger, LED_FULL);
861 852
862 /* Apply this short delay always to ensure that we do wait tWB in 853 /*
863 * any case on any machine. */ 854 * Apply this short delay always to ensure that we do wait tWB in any
855 * case on any machine.
856 */
864 ndelay(100); 857 ndelay(100);
865 858
866 if ((state == FL_ERASING) && (chip->options & NAND_IS_AND)) 859 if ((state == FL_ERASING) && (chip->options & NAND_IS_AND))
@@ -890,16 +883,15 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
890 883
891/** 884/**
892 * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks 885 * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
893 *
894 * @mtd: mtd info 886 * @mtd: mtd info
895 * @ofs: offset to start unlock from 887 * @ofs: offset to start unlock from
896 * @len: length to unlock 888 * @len: length to unlock
897 * @invert: when = 0, unlock the range of blocks within the lower and 889 * @invert: when = 0, unlock the range of blocks within the lower and
898 * upper boundary address 890 * upper boundary address
899 * when = 1, unlock the range of blocks outside the boundaries 891 * when = 1, unlock the range of blocks outside the boundaries
900 * of the lower and upper boundary address 892 * of the lower and upper boundary address
901 * 893 *
902 * return - unlock status 894 * Returs unlock status.
903 */ 895 */
904static int __nand_unlock(struct mtd_info *mtd, loff_t ofs, 896static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
905 uint64_t len, int invert) 897 uint64_t len, int invert)
@@ -919,10 +911,9 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
919 911
920 /* Call wait ready function */ 912 /* Call wait ready function */
921 status = chip->waitfunc(mtd, chip); 913 status = chip->waitfunc(mtd, chip);
922 udelay(1000);
923 /* See if device thinks it succeeded */ 914 /* See if device thinks it succeeded */
924 if (status & 0x01) { 915 if (status & 0x01) {
925 DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n", 916 pr_debug("%s: error status = 0x%08x\n",
926 __func__, status); 917 __func__, status);
927 ret = -EIO; 918 ret = -EIO;
928 } 919 }
@@ -932,12 +923,11 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
932 923
933/** 924/**
934 * nand_unlock - [REPLACEABLE] unlocks specified locked blocks 925 * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
935 *
936 * @mtd: mtd info 926 * @mtd: mtd info
937 * @ofs: offset to start unlock from 927 * @ofs: offset to start unlock from
938 * @len: length to unlock 928 * @len: length to unlock
939 * 929 *
940 * return - unlock status 930 * Returns unlock status.
941 */ 931 */
942int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 932int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
943{ 933{
@@ -945,7 +935,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
945 int chipnr; 935 int chipnr;
946 struct nand_chip *chip = mtd->priv; 936 struct nand_chip *chip = mtd->priv;
947 937
948 DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", 938 pr_debug("%s: start = 0x%012llx, len = %llu\n",
949 __func__, (unsigned long long)ofs, len); 939 __func__, (unsigned long long)ofs, len);
950 940
951 if (check_offs_len(mtd, ofs, len)) 941 if (check_offs_len(mtd, ofs, len))
@@ -964,7 +954,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
964 954
965 /* Check, if it is write protected */ 955 /* Check, if it is write protected */
966 if (nand_check_wp(mtd)) { 956 if (nand_check_wp(mtd)) {
967 DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", 957 pr_debug("%s: device is write protected!\n",
968 __func__); 958 __func__);
969 ret = -EIO; 959 ret = -EIO;
970 goto out; 960 goto out;
@@ -981,18 +971,16 @@ EXPORT_SYMBOL(nand_unlock);
981 971
982/** 972/**
983 * nand_lock - [REPLACEABLE] locks all blocks present in the device 973 * nand_lock - [REPLACEABLE] locks all blocks present in the device
984 *
985 * @mtd: mtd info 974 * @mtd: mtd info
986 * @ofs: offset to start unlock from 975 * @ofs: offset to start unlock from
987 * @len: length to unlock 976 * @len: length to unlock
988 * 977 *
989 * return - lock status 978 * This feature is not supported in many NAND parts. 'Micron' NAND parts do
979 * have this feature, but it allows only to lock all blocks, not for specified
980 * range for block. Implementing 'lock' feature by making use of 'unlock', for
981 * now.
990 * 982 *
991 * This feature is not supported in many NAND parts. 'Micron' NAND parts 983 * Returns lock status.
992 * do have this feature, but it allows only to lock all blocks, not for
993 * specified range for block.
994 *
995 * Implementing 'lock' feature by making use of 'unlock', for now.
996 */ 984 */
997int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 985int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
998{ 986{
@@ -1000,7 +988,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1000 int chipnr, status, page; 988 int chipnr, status, page;
1001 struct nand_chip *chip = mtd->priv; 989 struct nand_chip *chip = mtd->priv;
1002 990
1003 DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", 991 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1004 __func__, (unsigned long long)ofs, len); 992 __func__, (unsigned long long)ofs, len);
1005 993
1006 if (check_offs_len(mtd, ofs, len)) 994 if (check_offs_len(mtd, ofs, len))
@@ -1015,7 +1003,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1015 1003
1016 /* Check, if it is write protected */ 1004 /* Check, if it is write protected */
1017 if (nand_check_wp(mtd)) { 1005 if (nand_check_wp(mtd)) {
1018 DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", 1006 pr_debug("%s: device is write protected!\n",
1019 __func__); 1007 __func__);
1020 status = MTD_ERASE_FAILED; 1008 status = MTD_ERASE_FAILED;
1021 ret = -EIO; 1009 ret = -EIO;
@@ -1028,10 +1016,9 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1028 1016
1029 /* Call wait ready function */ 1017 /* Call wait ready function */
1030 status = chip->waitfunc(mtd, chip); 1018 status = chip->waitfunc(mtd, chip);
1031 udelay(1000);
1032 /* See if device thinks it succeeded */ 1019 /* See if device thinks it succeeded */
1033 if (status & 0x01) { 1020 if (status & 0x01) {
1034 DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n", 1021 pr_debug("%s: error status = 0x%08x\n",
1035 __func__, status); 1022 __func__, status);
1036 ret = -EIO; 1023 ret = -EIO;
1037 goto out; 1024 goto out;
@@ -1047,13 +1034,13 @@ out:
1047EXPORT_SYMBOL(nand_lock); 1034EXPORT_SYMBOL(nand_lock);
1048 1035
1049/** 1036/**
1050 * nand_read_page_raw - [Intern] read raw page data without ecc 1037 * nand_read_page_raw - [INTERN] read raw page data without ecc
1051 * @mtd: mtd info structure 1038 * @mtd: mtd info structure
1052 * @chip: nand chip info structure 1039 * @chip: nand chip info structure
1053 * @buf: buffer to store read data 1040 * @buf: buffer to store read data
1054 * @page: page number to read 1041 * @page: page number to read
1055 * 1042 *
1056 * Not for syndrome calculating ecc controllers, which use a special oob layout 1043 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1057 */ 1044 */
1058static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1045static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1059 uint8_t *buf, int page) 1046 uint8_t *buf, int page)
@@ -1064,11 +1051,11 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1064} 1051}
1065 1052
1066/** 1053/**
1067 * nand_read_page_raw_syndrome - [Intern] read raw page data without ecc 1054 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
1068 * @mtd: mtd info structure 1055 * @mtd: mtd info structure
1069 * @chip: nand chip info structure 1056 * @chip: nand chip info structure
1070 * @buf: buffer to store read data 1057 * @buf: buffer to store read data
1071 * @page: page number to read 1058 * @page: page number to read
1072 * 1059 *
1073 * We need a special oob layout and handling even when OOB isn't used. 1060 * We need a special oob layout and handling even when OOB isn't used.
1074 */ 1061 */
@@ -1107,11 +1094,11 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1107} 1094}
1108 1095
1109/** 1096/**
1110 * nand_read_page_swecc - [REPLACABLE] software ecc based page read function 1097 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
1111 * @mtd: mtd info structure 1098 * @mtd: mtd info structure
1112 * @chip: nand chip info structure 1099 * @chip: nand chip info structure
1113 * @buf: buffer to store read data 1100 * @buf: buffer to store read data
1114 * @page: page number to read 1101 * @page: page number to read
1115 */ 1102 */
1116static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, 1103static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1117 uint8_t *buf, int page) 1104 uint8_t *buf, int page)
@@ -1148,12 +1135,12 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1148} 1135}
1149 1136
1150/** 1137/**
1151 * nand_read_subpage - [REPLACABLE] software ecc based sub-page read function 1138 * nand_read_subpage - [REPLACEABLE] software ECC based sub-page read function
1152 * @mtd: mtd info structure 1139 * @mtd: mtd info structure
1153 * @chip: nand chip info structure 1140 * @chip: nand chip info structure
1154 * @data_offs: offset of requested data within the page 1141 * @data_offs: offset of requested data within the page
1155 * @readlen: data length 1142 * @readlen: data length
1156 * @bufpoi: buffer to store read data 1143 * @bufpoi: buffer to store read data
1157 */ 1144 */
1158static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, 1145static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1159 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi) 1146 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
@@ -1166,12 +1153,12 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1166 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; 1153 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1167 int index = 0; 1154 int index = 0;
1168 1155
1169 /* Column address wihin the page aligned to ECC size (256bytes). */ 1156 /* Column address within the page aligned to ECC size (256bytes) */
1170 start_step = data_offs / chip->ecc.size; 1157 start_step = data_offs / chip->ecc.size;
1171 end_step = (data_offs + readlen - 1) / chip->ecc.size; 1158 end_step = (data_offs + readlen - 1) / chip->ecc.size;
1172 num_steps = end_step - start_step + 1; 1159 num_steps = end_step - start_step + 1;
1173 1160
1174 /* Data size aligned to ECC ecc.size*/ 1161 /* Data size aligned to ECC ecc.size */
1175 datafrag_len = num_steps * chip->ecc.size; 1162 datafrag_len = num_steps * chip->ecc.size;
1176 eccfrag_len = num_steps * chip->ecc.bytes; 1163 eccfrag_len = num_steps * chip->ecc.bytes;
1177 1164
@@ -1183,13 +1170,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1183 p = bufpoi + data_col_addr; 1170 p = bufpoi + data_col_addr;
1184 chip->read_buf(mtd, p, datafrag_len); 1171 chip->read_buf(mtd, p, datafrag_len);
1185 1172
1186 /* Calculate ECC */ 1173 /* Calculate ECC */
1187 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) 1174 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
1188 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]); 1175 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
1189 1176
1190 /* The performance is faster if to position offsets 1177 /*
1191 according to ecc.pos. Let make sure here that 1178 * The performance is faster if we position offsets according to
1192 there are no gaps in ecc positions */ 1179 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
1180 */
1193 for (i = 0; i < eccfrag_len - 1; i++) { 1181 for (i = 0; i < eccfrag_len - 1; i++) {
1194 if (eccpos[i + start_step * chip->ecc.bytes] + 1 != 1182 if (eccpos[i + start_step * chip->ecc.bytes] + 1 !=
1195 eccpos[i + start_step * chip->ecc.bytes + 1]) { 1183 eccpos[i + start_step * chip->ecc.bytes + 1]) {
@@ -1201,8 +1189,10 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1201 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1); 1189 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
1202 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1190 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1203 } else { 1191 } else {
1204 /* send the command to read the particular ecc bytes */ 1192 /*
1205 /* take care about buswidth alignment in read_buf */ 1193 * Send the command to read the particular ECC bytes take care
1194 * about buswidth alignment in read_buf.
1195 */
1206 index = start_step * chip->ecc.bytes; 1196 index = start_step * chip->ecc.bytes;
1207 1197
1208 aligned_pos = eccpos[index] & ~(busw - 1); 1198 aligned_pos = eccpos[index] & ~(busw - 1);
@@ -1235,13 +1225,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1235} 1225}
1236 1226
1237/** 1227/**
1238 * nand_read_page_hwecc - [REPLACABLE] hardware ecc based page read function 1228 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
1239 * @mtd: mtd info structure 1229 * @mtd: mtd info structure
1240 * @chip: nand chip info structure 1230 * @chip: nand chip info structure
1241 * @buf: buffer to store read data 1231 * @buf: buffer to store read data
1242 * @page: page number to read 1232 * @page: page number to read
1243 * 1233 *
1244 * Not for syndrome calculating ecc controllers which need a special oob layout 1234 * Not for syndrome calculating ECC controllers which need a special oob layout.
1245 */ 1235 */
1246static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 1236static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1247 uint8_t *buf, int page) 1237 uint8_t *buf, int page)
@@ -1280,18 +1270,17 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1280} 1270}
1281 1271
1282/** 1272/**
1283 * nand_read_page_hwecc_oob_first - [REPLACABLE] hw ecc, read oob first 1273 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
1284 * @mtd: mtd info structure 1274 * @mtd: mtd info structure
1285 * @chip: nand chip info structure 1275 * @chip: nand chip info structure
1286 * @buf: buffer to store read data 1276 * @buf: buffer to store read data
1287 * @page: page number to read 1277 * @page: page number to read
1288 * 1278 *
1289 * Hardware ECC for large page chips, require OOB to be read first. 1279 * Hardware ECC for large page chips, require OOB to be read first. For this
1290 * For this ECC mode, the write_page method is re-used from ECC_HW. 1280 * ECC mode, the write_page method is re-used from ECC_HW. These methods
1291 * These methods read/write ECC from the OOB area, unlike the 1281 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
1292 * ECC_HW_SYNDROME support with multiple ECC steps, follows the 1282 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
1293 * "infix ECC" scheme and reads/writes ECC from the data area, by 1283 * the data area, by overwriting the NAND manufacturer bad block markings.
1294 * overwriting the NAND manufacturer bad block markings.
1295 */ 1284 */
1296static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, 1285static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1297 struct nand_chip *chip, uint8_t *buf, int page) 1286 struct nand_chip *chip, uint8_t *buf, int page)
@@ -1329,14 +1318,14 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1329} 1318}
1330 1319
1331/** 1320/**
1332 * nand_read_page_syndrome - [REPLACABLE] hardware ecc syndrom based page read 1321 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
1333 * @mtd: mtd info structure 1322 * @mtd: mtd info structure
1334 * @chip: nand chip info structure 1323 * @chip: nand chip info structure
1335 * @buf: buffer to store read data 1324 * @buf: buffer to store read data
1336 * @page: page number to read 1325 * @page: page number to read
1337 * 1326 *
1338 * The hw generator calculates the error syndrome automatically. Therefor 1327 * The hw generator calculates the error syndrome automatically. Therefore we
1339 * we need a special oob layout and handling. 1328 * need a special oob layout and handling.
1340 */ 1329 */
1341static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1330static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1342 uint8_t *buf, int page) 1331 uint8_t *buf, int page)
@@ -1384,29 +1373,29 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1384} 1373}
1385 1374
1386/** 1375/**
1387 * nand_transfer_oob - [Internal] Transfer oob to client buffer 1376 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
1388 * @chip: nand chip structure 1377 * @chip: nand chip structure
1389 * @oob: oob destination address 1378 * @oob: oob destination address
1390 * @ops: oob ops structure 1379 * @ops: oob ops structure
1391 * @len: size of oob to transfer 1380 * @len: size of oob to transfer
1392 */ 1381 */
1393static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, 1382static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
1394 struct mtd_oob_ops *ops, size_t len) 1383 struct mtd_oob_ops *ops, size_t len)
1395{ 1384{
1396 switch (ops->mode) { 1385 switch (ops->mode) {
1397 1386
1398 case MTD_OOB_PLACE: 1387 case MTD_OPS_PLACE_OOB:
1399 case MTD_OOB_RAW: 1388 case MTD_OPS_RAW:
1400 memcpy(oob, chip->oob_poi + ops->ooboffs, len); 1389 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
1401 return oob + len; 1390 return oob + len;
1402 1391
1403 case MTD_OOB_AUTO: { 1392 case MTD_OPS_AUTO_OOB: {
1404 struct nand_oobfree *free = chip->ecc.layout->oobfree; 1393 struct nand_oobfree *free = chip->ecc.layout->oobfree;
1405 uint32_t boffs = 0, roffs = ops->ooboffs; 1394 uint32_t boffs = 0, roffs = ops->ooboffs;
1406 size_t bytes = 0; 1395 size_t bytes = 0;
1407 1396
1408 for (; free->length && len; free++, len -= bytes) { 1397 for (; free->length && len; free++, len -= bytes) {
1409 /* Read request not from offset 0 ? */ 1398 /* Read request not from offset 0? */
1410 if (unlikely(roffs)) { 1399 if (unlikely(roffs)) {
1411 if (roffs >= free->length) { 1400 if (roffs >= free->length) {
1412 roffs -= free->length; 1401 roffs -= free->length;
@@ -1432,11 +1421,10 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
1432} 1421}
1433 1422
1434/** 1423/**
1435 * nand_do_read_ops - [Internal] Read data with ECC 1424 * nand_do_read_ops - [INTERN] Read data with ECC
1436 * 1425 * @mtd: MTD device structure
1437 * @mtd: MTD device structure 1426 * @from: offset to read from
1438 * @from: offset to read from 1427 * @ops: oob ops structure
1439 * @ops: oob ops structure
1440 * 1428 *
1441 * Internal function. Called with chip held. 1429 * Internal function. Called with chip held.
1442 */ 1430 */
@@ -1451,7 +1439,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1451 int ret = 0; 1439 int ret = 0;
1452 uint32_t readlen = ops->len; 1440 uint32_t readlen = ops->len;
1453 uint32_t oobreadlen = ops->ooblen; 1441 uint32_t oobreadlen = ops->ooblen;
1454 uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ? 1442 uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
1455 mtd->oobavail : mtd->oobsize; 1443 mtd->oobavail : mtd->oobsize;
1456 1444
1457 uint8_t *bufpoi, *oob, *buf; 1445 uint8_t *bufpoi, *oob, *buf;
@@ -1473,7 +1461,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1473 bytes = min(mtd->writesize - col, readlen); 1461 bytes = min(mtd->writesize - col, readlen);
1474 aligned = (bytes == mtd->writesize); 1462 aligned = (bytes == mtd->writesize);
1475 1463
1476 /* Is the current page in the buffer ? */ 1464 /* Is the current page in the buffer? */
1477 if (realpage != chip->pagebuf || oob) { 1465 if (realpage != chip->pagebuf || oob) {
1478 bufpoi = aligned ? buf : chip->buffers->databuf; 1466 bufpoi = aligned ? buf : chip->buffers->databuf;
1479 1467
@@ -1483,7 +1471,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1483 } 1471 }
1484 1472
1485 /* Now read the page into the buffer */ 1473 /* Now read the page into the buffer */
1486 if (unlikely(ops->mode == MTD_OOB_RAW)) 1474 if (unlikely(ops->mode == MTD_OPS_RAW))
1487 ret = chip->ecc.read_page_raw(mtd, chip, 1475 ret = chip->ecc.read_page_raw(mtd, chip,
1488 bufpoi, page); 1476 bufpoi, page);
1489 else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob) 1477 else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
@@ -1492,14 +1480,22 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1492 else 1480 else
1493 ret = chip->ecc.read_page(mtd, chip, bufpoi, 1481 ret = chip->ecc.read_page(mtd, chip, bufpoi,
1494 page); 1482 page);
1495 if (ret < 0) 1483 if (ret < 0) {
1484 if (!aligned)
1485 /* Invalidate page cache */
1486 chip->pagebuf = -1;
1496 break; 1487 break;
1488 }
1497 1489
1498 /* Transfer not aligned data */ 1490 /* Transfer not aligned data */
1499 if (!aligned) { 1491 if (!aligned) {
1500 if (!NAND_SUBPAGE_READ(chip) && !oob && 1492 if (!NAND_SUBPAGE_READ(chip) && !oob &&
1501 !(mtd->ecc_stats.failed - stats.failed)) 1493 !(mtd->ecc_stats.failed - stats.failed) &&
1494 (ops->mode != MTD_OPS_RAW))
1502 chip->pagebuf = realpage; 1495 chip->pagebuf = realpage;
1496 else
1497 /* Invalidate page cache */
1498 chip->pagebuf = -1;
1503 memcpy(buf, chip->buffers->databuf + col, bytes); 1499 memcpy(buf, chip->buffers->databuf + col, bytes);
1504 } 1500 }
1505 1501
@@ -1539,7 +1535,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1539 if (!readlen) 1535 if (!readlen)
1540 break; 1536 break;
1541 1537
1542 /* For subsequent reads align to page boundary. */ 1538 /* For subsequent reads align to page boundary */
1543 col = 0; 1539 col = 0;
1544 /* Increment page address */ 1540 /* Increment page address */
1545 realpage++; 1541 realpage++;
@@ -1552,8 +1548,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1552 chip->select_chip(mtd, chipnr); 1548 chip->select_chip(mtd, chipnr);
1553 } 1549 }
1554 1550
1555 /* Check, if the chip supports auto page increment 1551 /*
1556 * or if we have hit a block boundary. 1552 * Check, if the chip supports auto page increment or if we
1553 * have hit a block boundary.
1557 */ 1554 */
1558 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck)) 1555 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
1559 sndcmd = 1; 1556 sndcmd = 1;
@@ -1574,18 +1571,19 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1574 1571
1575/** 1572/**
1576 * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc 1573 * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
1577 * @mtd: MTD device structure 1574 * @mtd: MTD device structure
1578 * @from: offset to read from 1575 * @from: offset to read from
1579 * @len: number of bytes to read 1576 * @len: number of bytes to read
1580 * @retlen: pointer to variable to store the number of read bytes 1577 * @retlen: pointer to variable to store the number of read bytes
1581 * @buf: the databuffer to put data 1578 * @buf: the databuffer to put data
1582 * 1579 *
1583 * Get hold of the chip and call nand_do_read 1580 * Get hold of the chip and call nand_do_read.
1584 */ 1581 */
1585static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, 1582static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1586 size_t *retlen, uint8_t *buf) 1583 size_t *retlen, uint8_t *buf)
1587{ 1584{
1588 struct nand_chip *chip = mtd->priv; 1585 struct nand_chip *chip = mtd->priv;
1586 struct mtd_oob_ops ops;
1589 int ret; 1587 int ret;
1590 1588
1591 /* Do not allow reads past end of device */ 1589 /* Do not allow reads past end of device */
@@ -1596,13 +1594,14 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1596 1594
1597 nand_get_device(chip, mtd, FL_READING); 1595 nand_get_device(chip, mtd, FL_READING);
1598 1596
1599 chip->ops.len = len; 1597 ops.len = len;
1600 chip->ops.datbuf = buf; 1598 ops.datbuf = buf;
1601 chip->ops.oobbuf = NULL; 1599 ops.oobbuf = NULL;
1600 ops.mode = 0;
1602 1601
1603 ret = nand_do_read_ops(mtd, from, &chip->ops); 1602 ret = nand_do_read_ops(mtd, from, &ops);
1604 1603
1605 *retlen = chip->ops.retlen; 1604 *retlen = ops.retlen;
1606 1605
1607 nand_release_device(mtd); 1606 nand_release_device(mtd);
1608 1607
@@ -1610,11 +1609,11 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1610} 1609}
1611 1610
1612/** 1611/**
1613 * nand_read_oob_std - [REPLACABLE] the most common OOB data read function 1612 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
1614 * @mtd: mtd info structure 1613 * @mtd: mtd info structure
1615 * @chip: nand chip info structure 1614 * @chip: nand chip info structure
1616 * @page: page number to read 1615 * @page: page number to read
1617 * @sndcmd: flag whether to issue read command or not 1616 * @sndcmd: flag whether to issue read command or not
1618 */ 1617 */
1619static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, 1618static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1620 int page, int sndcmd) 1619 int page, int sndcmd)
@@ -1628,12 +1627,12 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1628} 1627}
1629 1628
1630/** 1629/**
1631 * nand_read_oob_syndrome - [REPLACABLE] OOB data read function for HW ECC 1630 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
1632 * with syndromes 1631 * with syndromes
1633 * @mtd: mtd info structure 1632 * @mtd: mtd info structure
1634 * @chip: nand chip info structure 1633 * @chip: nand chip info structure
1635 * @page: page number to read 1634 * @page: page number to read
1636 * @sndcmd: flag whether to issue read command or not 1635 * @sndcmd: flag whether to issue read command or not
1637 */ 1636 */
1638static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1637static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1639 int page, int sndcmd) 1638 int page, int sndcmd)
@@ -1667,10 +1666,10 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1667} 1666}
1668 1667
1669/** 1668/**
1670 * nand_write_oob_std - [REPLACABLE] the most common OOB data write function 1669 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
1671 * @mtd: mtd info structure 1670 * @mtd: mtd info structure
1672 * @chip: nand chip info structure 1671 * @chip: nand chip info structure
1673 * @page: page number to write 1672 * @page: page number to write
1674 */ 1673 */
1675static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, 1674static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1676 int page) 1675 int page)
@@ -1690,11 +1689,11 @@ static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1690} 1689}
1691 1690
1692/** 1691/**
1693 * nand_write_oob_syndrome - [REPLACABLE] OOB data write function for HW ECC 1692 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
1694 * with syndrome - only for large page flash ! 1693 * with syndrome - only for large page flash
1695 * @mtd: mtd info structure 1694 * @mtd: mtd info structure
1696 * @chip: nand chip info structure 1695 * @chip: nand chip info structure
1697 * @page: page number to write 1696 * @page: page number to write
1698 */ 1697 */
1699static int nand_write_oob_syndrome(struct mtd_info *mtd, 1698static int nand_write_oob_syndrome(struct mtd_info *mtd,
1700 struct nand_chip *chip, int page) 1699 struct nand_chip *chip, int page)
@@ -1749,34 +1748,37 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
1749} 1748}
1750 1749
1751/** 1750/**
1752 * nand_do_read_oob - [Intern] NAND read out-of-band 1751 * nand_do_read_oob - [INTERN] NAND read out-of-band
1753 * @mtd: MTD device structure 1752 * @mtd: MTD device structure
1754 * @from: offset to read from 1753 * @from: offset to read from
1755 * @ops: oob operations description structure 1754 * @ops: oob operations description structure
1756 * 1755 *
1757 * NAND read out-of-band data from the spare area 1756 * NAND read out-of-band data from the spare area.
1758 */ 1757 */
1759static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, 1758static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1760 struct mtd_oob_ops *ops) 1759 struct mtd_oob_ops *ops)
1761{ 1760{
1762 int page, realpage, chipnr, sndcmd = 1; 1761 int page, realpage, chipnr, sndcmd = 1;
1763 struct nand_chip *chip = mtd->priv; 1762 struct nand_chip *chip = mtd->priv;
1763 struct mtd_ecc_stats stats;
1764 int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1; 1764 int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
1765 int readlen = ops->ooblen; 1765 int readlen = ops->ooblen;
1766 int len; 1766 int len;
1767 uint8_t *buf = ops->oobbuf; 1767 uint8_t *buf = ops->oobbuf;
1768 1768
1769 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08Lx, len = %i\n", 1769 pr_debug("%s: from = 0x%08Lx, len = %i\n",
1770 __func__, (unsigned long long)from, readlen); 1770 __func__, (unsigned long long)from, readlen);
1771 1771
1772 if (ops->mode == MTD_OOB_AUTO) 1772 stats = mtd->ecc_stats;
1773
1774 if (ops->mode == MTD_OPS_AUTO_OOB)
1773 len = chip->ecc.layout->oobavail; 1775 len = chip->ecc.layout->oobavail;
1774 else 1776 else
1775 len = mtd->oobsize; 1777 len = mtd->oobsize;
1776 1778
1777 if (unlikely(ops->ooboffs >= len)) { 1779 if (unlikely(ops->ooboffs >= len)) {
1778 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start read " 1780 pr_debug("%s: attempt to start read outside oob\n",
1779 "outside oob\n", __func__); 1781 __func__);
1780 return -EINVAL; 1782 return -EINVAL;
1781 } 1783 }
1782 1784
@@ -1784,8 +1786,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1784 if (unlikely(from >= mtd->size || 1786 if (unlikely(from >= mtd->size ||
1785 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) - 1787 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
1786 (from >> chip->page_shift)) * len)) { 1788 (from >> chip->page_shift)) * len)) {
1787 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read beyond end " 1789 pr_debug("%s: attempt to read beyond end of device\n",
1788 "of device\n", __func__); 1790 __func__);
1789 return -EINVAL; 1791 return -EINVAL;
1790 } 1792 }
1791 1793
@@ -1797,7 +1799,10 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1797 page = realpage & chip->pagemask; 1799 page = realpage & chip->pagemask;
1798 1800
1799 while (1) { 1801 while (1) {
1800 sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd); 1802 if (ops->mode == MTD_OPS_RAW)
1803 sndcmd = chip->ecc.read_oob_raw(mtd, chip, page, sndcmd);
1804 else
1805 sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
1801 1806
1802 len = min(len, readlen); 1807 len = min(len, readlen);
1803 buf = nand_transfer_oob(chip, buf, ops, len); 1808 buf = nand_transfer_oob(chip, buf, ops, len);
@@ -1830,24 +1835,29 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1830 chip->select_chip(mtd, chipnr); 1835 chip->select_chip(mtd, chipnr);
1831 } 1836 }
1832 1837
1833 /* Check, if the chip supports auto page increment 1838 /*
1834 * or if we have hit a block boundary. 1839 * Check, if the chip supports auto page increment or if we
1840 * have hit a block boundary.
1835 */ 1841 */
1836 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck)) 1842 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
1837 sndcmd = 1; 1843 sndcmd = 1;
1838 } 1844 }
1839 1845
1840 ops->oobretlen = ops->ooblen; 1846 ops->oobretlen = ops->ooblen;
1841 return 0; 1847
1848 if (mtd->ecc_stats.failed - stats.failed)
1849 return -EBADMSG;
1850
1851 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
1842} 1852}
1843 1853
1844/** 1854/**
1845 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band 1855 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
1846 * @mtd: MTD device structure 1856 * @mtd: MTD device structure
1847 * @from: offset to read from 1857 * @from: offset to read from
1848 * @ops: oob operation description structure 1858 * @ops: oob operation description structure
1849 * 1859 *
1850 * NAND read data and/or out-of-band data 1860 * NAND read data and/or out-of-band data.
1851 */ 1861 */
1852static int nand_read_oob(struct mtd_info *mtd, loff_t from, 1862static int nand_read_oob(struct mtd_info *mtd, loff_t from,
1853 struct mtd_oob_ops *ops) 1863 struct mtd_oob_ops *ops)
@@ -1859,17 +1869,17 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
1859 1869
1860 /* Do not allow reads past end of device */ 1870 /* Do not allow reads past end of device */
1861 if (ops->datbuf && (from + ops->len) > mtd->size) { 1871 if (ops->datbuf && (from + ops->len) > mtd->size) {
1862 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read " 1872 pr_debug("%s: attempt to read beyond end of device\n",
1863 "beyond end of device\n", __func__); 1873 __func__);
1864 return -EINVAL; 1874 return -EINVAL;
1865 } 1875 }
1866 1876
1867 nand_get_device(chip, mtd, FL_READING); 1877 nand_get_device(chip, mtd, FL_READING);
1868 1878
1869 switch (ops->mode) { 1879 switch (ops->mode) {
1870 case MTD_OOB_PLACE: 1880 case MTD_OPS_PLACE_OOB:
1871 case MTD_OOB_AUTO: 1881 case MTD_OPS_AUTO_OOB:
1872 case MTD_OOB_RAW: 1882 case MTD_OPS_RAW:
1873 break; 1883 break;
1874 1884
1875 default: 1885 default:
@@ -1888,12 +1898,12 @@ out:
1888 1898
1889 1899
1890/** 1900/**
1891 * nand_write_page_raw - [Intern] raw page write function 1901 * nand_write_page_raw - [INTERN] raw page write function
1892 * @mtd: mtd info structure 1902 * @mtd: mtd info structure
1893 * @chip: nand chip info structure 1903 * @chip: nand chip info structure
1894 * @buf: data buffer 1904 * @buf: data buffer
1895 * 1905 *
1896 * Not for syndrome calculating ecc controllers, which use a special oob layout 1906 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1897 */ 1907 */
1898static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1908static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1899 const uint8_t *buf) 1909 const uint8_t *buf)
@@ -1903,10 +1913,10 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1903} 1913}
1904 1914
1905/** 1915/**
1906 * nand_write_page_raw_syndrome - [Intern] raw page write function 1916 * nand_write_page_raw_syndrome - [INTERN] raw page write function
1907 * @mtd: mtd info structure 1917 * @mtd: mtd info structure
1908 * @chip: nand chip info structure 1918 * @chip: nand chip info structure
1909 * @buf: data buffer 1919 * @buf: data buffer
1910 * 1920 *
1911 * We need a special oob layout and handling even when ECC isn't checked. 1921 * We need a special oob layout and handling even when ECC isn't checked.
1912 */ 1922 */
@@ -1942,10 +1952,10 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
1942 chip->write_buf(mtd, oob, size); 1952 chip->write_buf(mtd, oob, size);
1943} 1953}
1944/** 1954/**
1945 * nand_write_page_swecc - [REPLACABLE] software ecc based page write function 1955 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
1946 * @mtd: mtd info structure 1956 * @mtd: mtd info structure
1947 * @chip: nand chip info structure 1957 * @chip: nand chip info structure
1948 * @buf: data buffer 1958 * @buf: data buffer
1949 */ 1959 */
1950static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, 1960static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1951 const uint8_t *buf) 1961 const uint8_t *buf)
@@ -1957,7 +1967,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1957 const uint8_t *p = buf; 1967 const uint8_t *p = buf;
1958 uint32_t *eccpos = chip->ecc.layout->eccpos; 1968 uint32_t *eccpos = chip->ecc.layout->eccpos;
1959 1969
1960 /* Software ecc calculation */ 1970 /* Software ECC calculation */
1961 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 1971 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1962 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 1972 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1963 1973
@@ -1968,10 +1978,10 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1968} 1978}
1969 1979
1970/** 1980/**
1971 * nand_write_page_hwecc - [REPLACABLE] hardware ecc based page write function 1981 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
1972 * @mtd: mtd info structure 1982 * @mtd: mtd info structure
1973 * @chip: nand chip info structure 1983 * @chip: nand chip info structure
1974 * @buf: data buffer 1984 * @buf: data buffer
1975 */ 1985 */
1976static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 1986static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1977 const uint8_t *buf) 1987 const uint8_t *buf)
@@ -1996,13 +2006,13 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1996} 2006}
1997 2007
1998/** 2008/**
1999 * nand_write_page_syndrome - [REPLACABLE] hardware ecc syndrom based page write 2009 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
2000 * @mtd: mtd info structure 2010 * @mtd: mtd info structure
2001 * @chip: nand chip info structure 2011 * @chip: nand chip info structure
2002 * @buf: data buffer 2012 * @buf: data buffer
2003 * 2013 *
2004 * The hw generator calculates the error syndrome automatically. Therefor 2014 * The hw generator calculates the error syndrome automatically. Therefore we
2005 * we need a special oob layout and handling. 2015 * need a special oob layout and handling.
2006 */ 2016 */
2007static void nand_write_page_syndrome(struct mtd_info *mtd, 2017static void nand_write_page_syndrome(struct mtd_info *mtd,
2008 struct nand_chip *chip, const uint8_t *buf) 2018 struct nand_chip *chip, const uint8_t *buf)
@@ -2041,12 +2051,12 @@ static void nand_write_page_syndrome(struct mtd_info *mtd,
2041 2051
2042/** 2052/**
2043 * nand_write_page - [REPLACEABLE] write one page 2053 * nand_write_page - [REPLACEABLE] write one page
2044 * @mtd: MTD device structure 2054 * @mtd: MTD device structure
2045 * @chip: NAND chip descriptor 2055 * @chip: NAND chip descriptor
2046 * @buf: the data to write 2056 * @buf: the data to write
2047 * @page: page number to write 2057 * @page: page number to write
2048 * @cached: cached programming 2058 * @cached: cached programming
2049 * @raw: use _raw version of write_page 2059 * @raw: use _raw version of write_page
2050 */ 2060 */
2051static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, 2061static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2052 const uint8_t *buf, int page, int cached, int raw) 2062 const uint8_t *buf, int page, int cached, int raw)
@@ -2061,8 +2071,8 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2061 chip->ecc.write_page(mtd, chip, buf); 2071 chip->ecc.write_page(mtd, chip, buf);
2062 2072
2063 /* 2073 /*
2064 * Cached progamming disabled for now, Not sure if its worth the 2074 * Cached progamming disabled for now. Not sure if it's worth the
2065 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s) 2075 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
2066 */ 2076 */
2067 cached = 0; 2077 cached = 0;
2068 2078
@@ -2072,7 +2082,7 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2072 status = chip->waitfunc(mtd, chip); 2082 status = chip->waitfunc(mtd, chip);
2073 /* 2083 /*
2074 * See if operation failed and additional status checks are 2084 * See if operation failed and additional status checks are
2075 * available 2085 * available.
2076 */ 2086 */
2077 if ((status & NAND_STATUS_FAIL) && (chip->errstat)) 2087 if ((status & NAND_STATUS_FAIL) && (chip->errstat))
2078 status = chip->errstat(mtd, chip, FL_WRITING, status, 2088 status = chip->errstat(mtd, chip, FL_WRITING, status,
@@ -2096,29 +2106,37 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2096} 2106}
2097 2107
2098/** 2108/**
2099 * nand_fill_oob - [Internal] Transfer client buffer to oob 2109 * nand_fill_oob - [INTERN] Transfer client buffer to oob
2100 * @chip: nand chip structure 2110 * @mtd: MTD device structure
2101 * @oob: oob data buffer 2111 * @oob: oob data buffer
2102 * @len: oob data write length 2112 * @len: oob data write length
2103 * @ops: oob ops structure 2113 * @ops: oob ops structure
2104 */ 2114 */
2105static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len, 2115static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2106 struct mtd_oob_ops *ops) 2116 struct mtd_oob_ops *ops)
2107{ 2117{
2118 struct nand_chip *chip = mtd->priv;
2119
2120 /*
2121 * Initialise to all 0xFF, to avoid the possibility of left over OOB
2122 * data from a previous OOB read.
2123 */
2124 memset(chip->oob_poi, 0xff, mtd->oobsize);
2125
2108 switch (ops->mode) { 2126 switch (ops->mode) {
2109 2127
2110 case MTD_OOB_PLACE: 2128 case MTD_OPS_PLACE_OOB:
2111 case MTD_OOB_RAW: 2129 case MTD_OPS_RAW:
2112 memcpy(chip->oob_poi + ops->ooboffs, oob, len); 2130 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2113 return oob + len; 2131 return oob + len;
2114 2132
2115 case MTD_OOB_AUTO: { 2133 case MTD_OPS_AUTO_OOB: {
2116 struct nand_oobfree *free = chip->ecc.layout->oobfree; 2134 struct nand_oobfree *free = chip->ecc.layout->oobfree;
2117 uint32_t boffs = 0, woffs = ops->ooboffs; 2135 uint32_t boffs = 0, woffs = ops->ooboffs;
2118 size_t bytes = 0; 2136 size_t bytes = 0;
2119 2137
2120 for (; free->length && len; free++, len -= bytes) { 2138 for (; free->length && len; free++, len -= bytes) {
2121 /* Write request not from offset 0 ? */ 2139 /* Write request not from offset 0? */
2122 if (unlikely(woffs)) { 2140 if (unlikely(woffs)) {
2123 if (woffs >= free->length) { 2141 if (woffs >= free->length) {
2124 woffs -= free->length; 2142 woffs -= free->length;
@@ -2146,12 +2164,12 @@ static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
2146#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0) 2164#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
2147 2165
2148/** 2166/**
2149 * nand_do_write_ops - [Internal] NAND write with ECC 2167 * nand_do_write_ops - [INTERN] NAND write with ECC
2150 * @mtd: MTD device structure 2168 * @mtd: MTD device structure
2151 * @to: offset to write to 2169 * @to: offset to write to
2152 * @ops: oob operations description structure 2170 * @ops: oob operations description structure
2153 * 2171 *
2154 * NAND write with ECC 2172 * NAND write with ECC.
2155 */ 2173 */
2156static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, 2174static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2157 struct mtd_oob_ops *ops) 2175 struct mtd_oob_ops *ops)
@@ -2161,7 +2179,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2161 uint32_t writelen = ops->len; 2179 uint32_t writelen = ops->len;
2162 2180
2163 uint32_t oobwritelen = ops->ooblen; 2181 uint32_t oobwritelen = ops->ooblen;
2164 uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ? 2182 uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
2165 mtd->oobavail : mtd->oobsize; 2183 mtd->oobavail : mtd->oobsize;
2166 2184
2167 uint8_t *oob = ops->oobbuf; 2185 uint8_t *oob = ops->oobbuf;
@@ -2172,10 +2190,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2172 if (!writelen) 2190 if (!writelen)
2173 return 0; 2191 return 0;
2174 2192
2175 /* reject writes, which are not page aligned */ 2193 /* Reject writes, which are not page aligned */
2176 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { 2194 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
2177 printk(KERN_NOTICE "%s: Attempt to write not " 2195 pr_notice("%s: attempt to write non page aligned data\n",
2178 "page aligned data\n", __func__); 2196 __func__);
2179 return -EINVAL; 2197 return -EINVAL;
2180 } 2198 }
2181 2199
@@ -2201,10 +2219,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2201 (chip->pagebuf << chip->page_shift) < (to + ops->len)) 2219 (chip->pagebuf << chip->page_shift) < (to + ops->len))
2202 chip->pagebuf = -1; 2220 chip->pagebuf = -1;
2203 2221
2204 /* If we're not given explicit OOB data, let it be 0xFF */
2205 if (likely(!oob))
2206 memset(chip->oob_poi, 0xff, mtd->oobsize);
2207
2208 /* Don't allow multipage oob writes with offset */ 2222 /* Don't allow multipage oob writes with offset */
2209 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) 2223 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
2210 return -EINVAL; 2224 return -EINVAL;
@@ -2214,7 +2228,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2214 int cached = writelen > bytes && page != blockmask; 2228 int cached = writelen > bytes && page != blockmask;
2215 uint8_t *wbuf = buf; 2229 uint8_t *wbuf = buf;
2216 2230
2217 /* Partial page write ? */ 2231 /* Partial page write? */
2218 if (unlikely(column || writelen < (mtd->writesize - 1))) { 2232 if (unlikely(column || writelen < (mtd->writesize - 1))) {
2219 cached = 0; 2233 cached = 0;
2220 bytes = min_t(int, bytes - column, (int) writelen); 2234 bytes = min_t(int, bytes - column, (int) writelen);
@@ -2226,12 +2240,15 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2226 2240
2227 if (unlikely(oob)) { 2241 if (unlikely(oob)) {
2228 size_t len = min(oobwritelen, oobmaxlen); 2242 size_t len = min(oobwritelen, oobmaxlen);
2229 oob = nand_fill_oob(chip, oob, len, ops); 2243 oob = nand_fill_oob(mtd, oob, len, ops);
2230 oobwritelen -= len; 2244 oobwritelen -= len;
2245 } else {
2246 /* We still need to erase leftover OOB data */
2247 memset(chip->oob_poi, 0xff, mtd->oobsize);
2231 } 2248 }
2232 2249
2233 ret = chip->write_page(mtd, chip, wbuf, page, cached, 2250 ret = chip->write_page(mtd, chip, wbuf, page, cached,
2234 (ops->mode == MTD_OOB_RAW)); 2251 (ops->mode == MTD_OPS_RAW));
2235 if (ret) 2252 if (ret)
2236 break; 2253 break;
2237 2254
@@ -2260,11 +2277,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2260 2277
2261/** 2278/**
2262 * panic_nand_write - [MTD Interface] NAND write with ECC 2279 * panic_nand_write - [MTD Interface] NAND write with ECC
2263 * @mtd: MTD device structure 2280 * @mtd: MTD device structure
2264 * @to: offset to write to 2281 * @to: offset to write to
2265 * @len: number of bytes to write 2282 * @len: number of bytes to write
2266 * @retlen: pointer to variable to store the number of written bytes 2283 * @retlen: pointer to variable to store the number of written bytes
2267 * @buf: the data to write 2284 * @buf: the data to write
2268 * 2285 *
2269 * NAND write with ECC. Used when performing writes in interrupt context, this 2286 * NAND write with ECC. Used when performing writes in interrupt context, this
2270 * may for example be called by mtdoops when writing an oops while in panic. 2287 * may for example be called by mtdoops when writing an oops while in panic.
@@ -2273,6 +2290,7 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2273 size_t *retlen, const uint8_t *buf) 2290 size_t *retlen, const uint8_t *buf)
2274{ 2291{
2275 struct nand_chip *chip = mtd->priv; 2292 struct nand_chip *chip = mtd->priv;
2293 struct mtd_oob_ops ops;
2276 int ret; 2294 int ret;
2277 2295
2278 /* Do not allow reads past end of device */ 2296 /* Do not allow reads past end of device */
@@ -2281,36 +2299,38 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2281 if (!len) 2299 if (!len)
2282 return 0; 2300 return 0;
2283 2301
2284 /* Wait for the device to get ready. */ 2302 /* Wait for the device to get ready */
2285 panic_nand_wait(mtd, chip, 400); 2303 panic_nand_wait(mtd, chip, 400);
2286 2304
2287 /* Grab the device. */ 2305 /* Grab the device */
2288 panic_nand_get_device(chip, mtd, FL_WRITING); 2306 panic_nand_get_device(chip, mtd, FL_WRITING);
2289 2307
2290 chip->ops.len = len; 2308 ops.len = len;
2291 chip->ops.datbuf = (uint8_t *)buf; 2309 ops.datbuf = (uint8_t *)buf;
2292 chip->ops.oobbuf = NULL; 2310 ops.oobbuf = NULL;
2311 ops.mode = 0;
2293 2312
2294 ret = nand_do_write_ops(mtd, to, &chip->ops); 2313 ret = nand_do_write_ops(mtd, to, &ops);
2295 2314
2296 *retlen = chip->ops.retlen; 2315 *retlen = ops.retlen;
2297 return ret; 2316 return ret;
2298} 2317}
2299 2318
2300/** 2319/**
2301 * nand_write - [MTD Interface] NAND write with ECC 2320 * nand_write - [MTD Interface] NAND write with ECC
2302 * @mtd: MTD device structure 2321 * @mtd: MTD device structure
2303 * @to: offset to write to 2322 * @to: offset to write to
2304 * @len: number of bytes to write 2323 * @len: number of bytes to write
2305 * @retlen: pointer to variable to store the number of written bytes 2324 * @retlen: pointer to variable to store the number of written bytes
2306 * @buf: the data to write 2325 * @buf: the data to write
2307 * 2326 *
2308 * NAND write with ECC 2327 * NAND write with ECC.
2309 */ 2328 */
2310static int nand_write(struct mtd_info *mtd, loff_t to, size_t len, 2329static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2311 size_t *retlen, const uint8_t *buf) 2330 size_t *retlen, const uint8_t *buf)
2312{ 2331{
2313 struct nand_chip *chip = mtd->priv; 2332 struct nand_chip *chip = mtd->priv;
2333 struct mtd_oob_ops ops;
2314 int ret; 2334 int ret;
2315 2335
2316 /* Do not allow reads past end of device */ 2336 /* Do not allow reads past end of device */
@@ -2321,13 +2341,14 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2321 2341
2322 nand_get_device(chip, mtd, FL_WRITING); 2342 nand_get_device(chip, mtd, FL_WRITING);
2323 2343
2324 chip->ops.len = len; 2344 ops.len = len;
2325 chip->ops.datbuf = (uint8_t *)buf; 2345 ops.datbuf = (uint8_t *)buf;
2326 chip->ops.oobbuf = NULL; 2346 ops.oobbuf = NULL;
2347 ops.mode = 0;
2327 2348
2328 ret = nand_do_write_ops(mtd, to, &chip->ops); 2349 ret = nand_do_write_ops(mtd, to, &ops);
2329 2350
2330 *retlen = chip->ops.retlen; 2351 *retlen = ops.retlen;
2331 2352
2332 nand_release_device(mtd); 2353 nand_release_device(mtd);
2333 2354
@@ -2336,11 +2357,11 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2336 2357
2337/** 2358/**
2338 * nand_do_write_oob - [MTD Interface] NAND write out-of-band 2359 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
2339 * @mtd: MTD device structure 2360 * @mtd: MTD device structure
2340 * @to: offset to write to 2361 * @to: offset to write to
2341 * @ops: oob operation description structure 2362 * @ops: oob operation description structure
2342 * 2363 *
2343 * NAND write out-of-band 2364 * NAND write out-of-band.
2344 */ 2365 */
2345static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, 2366static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2346 struct mtd_oob_ops *ops) 2367 struct mtd_oob_ops *ops)
@@ -2348,24 +2369,24 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2348 int chipnr, page, status, len; 2369 int chipnr, page, status, len;
2349 struct nand_chip *chip = mtd->priv; 2370 struct nand_chip *chip = mtd->priv;
2350 2371
2351 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", 2372 pr_debug("%s: to = 0x%08x, len = %i\n",
2352 __func__, (unsigned int)to, (int)ops->ooblen); 2373 __func__, (unsigned int)to, (int)ops->ooblen);
2353 2374
2354 if (ops->mode == MTD_OOB_AUTO) 2375 if (ops->mode == MTD_OPS_AUTO_OOB)
2355 len = chip->ecc.layout->oobavail; 2376 len = chip->ecc.layout->oobavail;
2356 else 2377 else
2357 len = mtd->oobsize; 2378 len = mtd->oobsize;
2358 2379
2359 /* Do not allow write past end of page */ 2380 /* Do not allow write past end of page */
2360 if ((ops->ooboffs + ops->ooblen) > len) { 2381 if ((ops->ooboffs + ops->ooblen) > len) {
2361 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to write " 2382 pr_debug("%s: attempt to write past end of page\n",
2362 "past end of page\n", __func__); 2383 __func__);
2363 return -EINVAL; 2384 return -EINVAL;
2364 } 2385 }
2365 2386
2366 if (unlikely(ops->ooboffs >= len)) { 2387 if (unlikely(ops->ooboffs >= len)) {
2367 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start " 2388 pr_debug("%s: attempt to start write outside oob\n",
2368 "write outside oob\n", __func__); 2389 __func__);
2369 return -EINVAL; 2390 return -EINVAL;
2370 } 2391 }
2371 2392
@@ -2374,8 +2395,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2374 ops->ooboffs + ops->ooblen > 2395 ops->ooboffs + ops->ooblen >
2375 ((mtd->size >> chip->page_shift) - 2396 ((mtd->size >> chip->page_shift) -
2376 (to >> chip->page_shift)) * len)) { 2397 (to >> chip->page_shift)) * len)) {
2377 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond " 2398 pr_debug("%s: attempt to write beyond end of device\n",
2378 "end of device\n", __func__); 2399 __func__);
2379 return -EINVAL; 2400 return -EINVAL;
2380 } 2401 }
2381 2402
@@ -2401,10 +2422,12 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2401 if (page == chip->pagebuf) 2422 if (page == chip->pagebuf)
2402 chip->pagebuf = -1; 2423 chip->pagebuf = -1;
2403 2424
2404 memset(chip->oob_poi, 0xff, mtd->oobsize); 2425 nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
2405 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops); 2426
2406 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); 2427 if (ops->mode == MTD_OPS_RAW)
2407 memset(chip->oob_poi, 0xff, mtd->oobsize); 2428 status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
2429 else
2430 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
2408 2431
2409 if (status) 2432 if (status)
2410 return status; 2433 return status;
@@ -2416,9 +2439,9 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2416 2439
2417/** 2440/**
2418 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band 2441 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
2419 * @mtd: MTD device structure 2442 * @mtd: MTD device structure
2420 * @to: offset to write to 2443 * @to: offset to write to
2421 * @ops: oob operation description structure 2444 * @ops: oob operation description structure
2422 */ 2445 */
2423static int nand_write_oob(struct mtd_info *mtd, loff_t to, 2446static int nand_write_oob(struct mtd_info *mtd, loff_t to,
2424 struct mtd_oob_ops *ops) 2447 struct mtd_oob_ops *ops)
@@ -2430,17 +2453,17 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
2430 2453
2431 /* Do not allow writes past end of device */ 2454 /* Do not allow writes past end of device */
2432 if (ops->datbuf && (to + ops->len) > mtd->size) { 2455 if (ops->datbuf && (to + ops->len) > mtd->size) {
2433 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond " 2456 pr_debug("%s: attempt to write beyond end of device\n",
2434 "end of device\n", __func__); 2457 __func__);
2435 return -EINVAL; 2458 return -EINVAL;
2436 } 2459 }
2437 2460
2438 nand_get_device(chip, mtd, FL_WRITING); 2461 nand_get_device(chip, mtd, FL_WRITING);
2439 2462
2440 switch (ops->mode) { 2463 switch (ops->mode) {
2441 case MTD_OOB_PLACE: 2464 case MTD_OPS_PLACE_OOB:
2442 case MTD_OOB_AUTO: 2465 case MTD_OPS_AUTO_OOB:
2443 case MTD_OOB_RAW: 2466 case MTD_OPS_RAW:
2444 break; 2467 break;
2445 2468
2446 default: 2469 default:
@@ -2458,11 +2481,11 @@ out:
2458} 2481}
2459 2482
2460/** 2483/**
2461 * single_erease_cmd - [GENERIC] NAND standard block erase command function 2484 * single_erase_cmd - [GENERIC] NAND standard block erase command function
2462 * @mtd: MTD device structure 2485 * @mtd: MTD device structure
2463 * @page: the page address of the block which will be erased 2486 * @page: the page address of the block which will be erased
2464 * 2487 *
2465 * Standard erase command for NAND chips 2488 * Standard erase command for NAND chips.
2466 */ 2489 */
2467static void single_erase_cmd(struct mtd_info *mtd, int page) 2490static void single_erase_cmd(struct mtd_info *mtd, int page)
2468{ 2491{
@@ -2473,12 +2496,11 @@ static void single_erase_cmd(struct mtd_info *mtd, int page)
2473} 2496}
2474 2497
2475/** 2498/**
2476 * multi_erease_cmd - [GENERIC] AND specific block erase command function 2499 * multi_erase_cmd - [GENERIC] AND specific block erase command function
2477 * @mtd: MTD device structure 2500 * @mtd: MTD device structure
2478 * @page: the page address of the block which will be erased 2501 * @page: the page address of the block which will be erased
2479 * 2502 *
2480 * AND multi block erase command function 2503 * AND multi block erase command function. Erase 4 consecutive blocks.
2481 * Erase 4 consecutive blocks
2482 */ 2504 */
2483static void multi_erase_cmd(struct mtd_info *mtd, int page) 2505static void multi_erase_cmd(struct mtd_info *mtd, int page)
2484{ 2506{
@@ -2493,10 +2515,10 @@ static void multi_erase_cmd(struct mtd_info *mtd, int page)
2493 2515
2494/** 2516/**
2495 * nand_erase - [MTD Interface] erase block(s) 2517 * nand_erase - [MTD Interface] erase block(s)
2496 * @mtd: MTD device structure 2518 * @mtd: MTD device structure
2497 * @instr: erase instruction 2519 * @instr: erase instruction
2498 * 2520 *
2499 * Erase one ore more blocks 2521 * Erase one ore more blocks.
2500 */ 2522 */
2501static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) 2523static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
2502{ 2524{
@@ -2505,12 +2527,12 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
2505 2527
2506#define BBT_PAGE_MASK 0xffffff3f 2528#define BBT_PAGE_MASK 0xffffff3f
2507/** 2529/**
2508 * nand_erase_nand - [Internal] erase block(s) 2530 * nand_erase_nand - [INTERN] erase block(s)
2509 * @mtd: MTD device structure 2531 * @mtd: MTD device structure
2510 * @instr: erase instruction 2532 * @instr: erase instruction
2511 * @allowbbt: allow erasing the bbt area 2533 * @allowbbt: allow erasing the bbt area
2512 * 2534 *
2513 * Erase one ore more blocks 2535 * Erase one ore more blocks.
2514 */ 2536 */
2515int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, 2537int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2516 int allowbbt) 2538 int allowbbt)
@@ -2521,9 +2543,9 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2521 unsigned int bbt_masked_page = 0xffffffff; 2543 unsigned int bbt_masked_page = 0xffffffff;
2522 loff_t len; 2544 loff_t len;
2523 2545
2524 DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", 2546 pr_debug("%s: start = 0x%012llx, len = %llu\n",
2525 __func__, (unsigned long long)instr->addr, 2547 __func__, (unsigned long long)instr->addr,
2526 (unsigned long long)instr->len); 2548 (unsigned long long)instr->len);
2527 2549
2528 if (check_offs_len(mtd, instr->addr, instr->len)) 2550 if (check_offs_len(mtd, instr->addr, instr->len))
2529 return -EINVAL; 2551 return -EINVAL;
@@ -2545,8 +2567,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2545 2567
2546 /* Check, if it is write protected */ 2568 /* Check, if it is write protected */
2547 if (nand_check_wp(mtd)) { 2569 if (nand_check_wp(mtd)) {
2548 DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", 2570 pr_debug("%s: device is write protected!\n",
2549 __func__); 2571 __func__);
2550 instr->state = MTD_ERASE_FAILED; 2572 instr->state = MTD_ERASE_FAILED;
2551 goto erase_exit; 2573 goto erase_exit;
2552 } 2574 }
@@ -2555,7 +2577,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2555 * If BBT requires refresh, set the BBT page mask to see if the BBT 2577 * If BBT requires refresh, set the BBT page mask to see if the BBT
2556 * should be rewritten. Otherwise the mask is set to 0xffffffff which 2578 * should be rewritten. Otherwise the mask is set to 0xffffffff which
2557 * can not be matched. This is also done when the bbt is actually 2579 * can not be matched. This is also done when the bbt is actually
2558 * erased to avoid recusrsive updates 2580 * erased to avoid recursive updates.
2559 */ 2581 */
2560 if (chip->options & BBT_AUTO_REFRESH && !allowbbt) 2582 if (chip->options & BBT_AUTO_REFRESH && !allowbbt)
2561 bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK; 2583 bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
@@ -2566,20 +2588,18 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2566 instr->state = MTD_ERASING; 2588 instr->state = MTD_ERASING;
2567 2589
2568 while (len) { 2590 while (len) {
2569 /* 2591 /* Heck if we have a bad block, we do not erase bad blocks! */
2570 * heck if we have a bad block, we do not erase bad blocks !
2571 */
2572 if (nand_block_checkbad(mtd, ((loff_t) page) << 2592 if (nand_block_checkbad(mtd, ((loff_t) page) <<
2573 chip->page_shift, 0, allowbbt)) { 2593 chip->page_shift, 0, allowbbt)) {
2574 printk(KERN_WARNING "%s: attempt to erase a bad block " 2594 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
2575 "at page 0x%08x\n", __func__, page); 2595 __func__, page);
2576 instr->state = MTD_ERASE_FAILED; 2596 instr->state = MTD_ERASE_FAILED;
2577 goto erase_exit; 2597 goto erase_exit;
2578 } 2598 }
2579 2599
2580 /* 2600 /*
2581 * Invalidate the page cache, if we erase the block which 2601 * Invalidate the page cache, if we erase the block which
2582 * contains the current cached page 2602 * contains the current cached page.
2583 */ 2603 */
2584 if (page <= chip->pagebuf && chip->pagebuf < 2604 if (page <= chip->pagebuf && chip->pagebuf <
2585 (page + pages_per_block)) 2605 (page + pages_per_block))
@@ -2599,8 +2619,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2599 2619
2600 /* See if block erase succeeded */ 2620 /* See if block erase succeeded */
2601 if (status & NAND_STATUS_FAIL) { 2621 if (status & NAND_STATUS_FAIL) {
2602 DEBUG(MTD_DEBUG_LEVEL0, "%s: Failed erase, " 2622 pr_debug("%s: failed erase, page 0x%08x\n",
2603 "page 0x%08x\n", __func__, page); 2623 __func__, page);
2604 instr->state = MTD_ERASE_FAILED; 2624 instr->state = MTD_ERASE_FAILED;
2605 instr->fail_addr = 2625 instr->fail_addr =
2606 ((loff_t)page << chip->page_shift); 2626 ((loff_t)page << chip->page_shift);
@@ -2609,7 +2629,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2609 2629
2610 /* 2630 /*
2611 * If BBT requires refresh, set the BBT rewrite flag to the 2631 * If BBT requires refresh, set the BBT rewrite flag to the
2612 * page being erased 2632 * page being erased.
2613 */ 2633 */
2614 if (bbt_masked_page != 0xffffffff && 2634 if (bbt_masked_page != 0xffffffff &&
2615 (page & BBT_PAGE_MASK) == bbt_masked_page) 2635 (page & BBT_PAGE_MASK) == bbt_masked_page)
@@ -2628,7 +2648,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2628 2648
2629 /* 2649 /*
2630 * If BBT requires refresh and BBT-PERCHIP, set the BBT 2650 * If BBT requires refresh and BBT-PERCHIP, set the BBT
2631 * page mask to see if this BBT should be rewritten 2651 * page mask to see if this BBT should be rewritten.
2632 */ 2652 */
2633 if (bbt_masked_page != 0xffffffff && 2653 if (bbt_masked_page != 0xffffffff &&
2634 (chip->bbt_td->options & NAND_BBT_PERCHIP)) 2654 (chip->bbt_td->options & NAND_BBT_PERCHIP))
@@ -2651,7 +2671,7 @@ erase_exit:
2651 2671
2652 /* 2672 /*
2653 * If BBT requires refresh and erase was successful, rewrite any 2673 * If BBT requires refresh and erase was successful, rewrite any
2654 * selected bad block tables 2674 * selected bad block tables.
2655 */ 2675 */
2656 if (bbt_masked_page == 0xffffffff || ret) 2676 if (bbt_masked_page == 0xffffffff || ret)
2657 return ret; 2677 return ret;
@@ -2659,10 +2679,10 @@ erase_exit:
2659 for (chipnr = 0; chipnr < chip->numchips; chipnr++) { 2679 for (chipnr = 0; chipnr < chip->numchips; chipnr++) {
2660 if (!rewrite_bbt[chipnr]) 2680 if (!rewrite_bbt[chipnr])
2661 continue; 2681 continue;
2662 /* update the BBT for chip */ 2682 /* Update the BBT for chip */
2663 DEBUG(MTD_DEBUG_LEVEL0, "%s: nand_update_bbt " 2683 pr_debug("%s: nand_update_bbt (%d:0x%0llx 0x%0x)\n",
2664 "(%d:0x%0llx 0x%0x)\n", __func__, chipnr, 2684 __func__, chipnr, rewrite_bbt[chipnr],
2665 rewrite_bbt[chipnr], chip->bbt_td->pages[chipnr]); 2685 chip->bbt_td->pages[chipnr]);
2666 nand_update_bbt(mtd, rewrite_bbt[chipnr]); 2686 nand_update_bbt(mtd, rewrite_bbt[chipnr]);
2667 } 2687 }
2668 2688
@@ -2672,15 +2692,15 @@ erase_exit:
2672 2692
2673/** 2693/**
2674 * nand_sync - [MTD Interface] sync 2694 * nand_sync - [MTD Interface] sync
2675 * @mtd: MTD device structure 2695 * @mtd: MTD device structure
2676 * 2696 *
2677 * Sync is actually a wait for chip ready function 2697 * Sync is actually a wait for chip ready function.
2678 */ 2698 */
2679static void nand_sync(struct mtd_info *mtd) 2699static void nand_sync(struct mtd_info *mtd)
2680{ 2700{
2681 struct nand_chip *chip = mtd->priv; 2701 struct nand_chip *chip = mtd->priv;
2682 2702
2683 DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__); 2703 pr_debug("%s: called\n", __func__);
2684 2704
2685 /* Grab the lock and see if the device is available */ 2705 /* Grab the lock and see if the device is available */
2686 nand_get_device(chip, mtd, FL_SYNCING); 2706 nand_get_device(chip, mtd, FL_SYNCING);
@@ -2690,8 +2710,8 @@ static void nand_sync(struct mtd_info *mtd)
2690 2710
2691/** 2711/**
2692 * nand_block_isbad - [MTD Interface] Check if block at offset is bad 2712 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
2693 * @mtd: MTD device structure 2713 * @mtd: MTD device structure
2694 * @offs: offset relative to mtd start 2714 * @offs: offset relative to mtd start
2695 */ 2715 */
2696static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) 2716static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
2697{ 2717{
@@ -2704,8 +2724,8 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
2704 2724
2705/** 2725/**
2706 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad 2726 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
2707 * @mtd: MTD device structure 2727 * @mtd: MTD device structure
2708 * @ofs: offset relative to mtd start 2728 * @ofs: offset relative to mtd start
2709 */ 2729 */
2710static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) 2730static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2711{ 2731{
@@ -2714,7 +2734,7 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2714 2734
2715 ret = nand_block_isbad(mtd, ofs); 2735 ret = nand_block_isbad(mtd, ofs);
2716 if (ret) { 2736 if (ret) {
2717 /* If it was bad already, return success and do nothing. */ 2737 /* If it was bad already, return success and do nothing */
2718 if (ret > 0) 2738 if (ret > 0)
2719 return 0; 2739 return 0;
2720 return ret; 2740 return ret;
@@ -2725,7 +2745,7 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2725 2745
2726/** 2746/**
2727 * nand_suspend - [MTD Interface] Suspend the NAND flash 2747 * nand_suspend - [MTD Interface] Suspend the NAND flash
2728 * @mtd: MTD device structure 2748 * @mtd: MTD device structure
2729 */ 2749 */
2730static int nand_suspend(struct mtd_info *mtd) 2750static int nand_suspend(struct mtd_info *mtd)
2731{ 2751{
@@ -2736,7 +2756,7 @@ static int nand_suspend(struct mtd_info *mtd)
2736 2756
2737/** 2757/**
2738 * nand_resume - [MTD Interface] Resume the NAND flash 2758 * nand_resume - [MTD Interface] Resume the NAND flash
2739 * @mtd: MTD device structure 2759 * @mtd: MTD device structure
2740 */ 2760 */
2741static void nand_resume(struct mtd_info *mtd) 2761static void nand_resume(struct mtd_info *mtd)
2742{ 2762{
@@ -2745,13 +2765,11 @@ static void nand_resume(struct mtd_info *mtd)
2745 if (chip->state == FL_PM_SUSPENDED) 2765 if (chip->state == FL_PM_SUSPENDED)
2746 nand_release_device(mtd); 2766 nand_release_device(mtd);
2747 else 2767 else
2748 printk(KERN_ERR "%s called for a chip which is not " 2768 pr_err("%s called for a chip which is not in suspended state\n",
2749 "in suspended state\n", __func__); 2769 __func__);
2750} 2770}
2751 2771
2752/* 2772/* Set default functions */
2753 * Set default functions
2754 */
2755static void nand_set_defaults(struct nand_chip *chip, int busw) 2773static void nand_set_defaults(struct nand_chip *chip, int busw)
2756{ 2774{
2757 /* check for proper chip_delay setup, set 20us if not */ 2775 /* check for proper chip_delay setup, set 20us if not */
@@ -2793,23 +2811,21 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
2793 2811
2794} 2812}
2795 2813
2796/* 2814/* Sanitize ONFI strings so we can safely print them */
2797 * sanitize ONFI strings so we can safely print them
2798 */
2799static void sanitize_string(uint8_t *s, size_t len) 2815static void sanitize_string(uint8_t *s, size_t len)
2800{ 2816{
2801 ssize_t i; 2817 ssize_t i;
2802 2818
2803 /* null terminate */ 2819 /* Null terminate */
2804 s[len - 1] = 0; 2820 s[len - 1] = 0;
2805 2821
2806 /* remove non printable chars */ 2822 /* Remove non printable chars */
2807 for (i = 0; i < len - 1; i++) { 2823 for (i = 0; i < len - 1; i++) {
2808 if (s[i] < ' ' || s[i] > 127) 2824 if (s[i] < ' ' || s[i] > 127)
2809 s[i] = '?'; 2825 s[i] = '?';
2810 } 2826 }
2811 2827
2812 /* remove trailing spaces */ 2828 /* Remove trailing spaces */
2813 strim(s); 2829 strim(s);
2814} 2830}
2815 2831
@@ -2826,28 +2842,28 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
2826} 2842}
2827 2843
2828/* 2844/*
2829 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise 2845 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
2830 */ 2846 */
2831static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, 2847static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2832 int busw) 2848 int *busw)
2833{ 2849{
2834 struct nand_onfi_params *p = &chip->onfi_params; 2850 struct nand_onfi_params *p = &chip->onfi_params;
2835 int i; 2851 int i;
2836 int val; 2852 int val;
2837 2853
2838 /* try ONFI for unknow chip or LP */ 2854 /* Try ONFI for unknown chip or LP */
2839 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1); 2855 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
2840 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' || 2856 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
2841 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I') 2857 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
2842 return 0; 2858 return 0;
2843 2859
2844 printk(KERN_INFO "ONFI flash detected\n"); 2860 pr_info("ONFI flash detected\n");
2845 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); 2861 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
2846 for (i = 0; i < 3; i++) { 2862 for (i = 0; i < 3; i++) {
2847 chip->read_buf(mtd, (uint8_t *)p, sizeof(*p)); 2863 chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
2848 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) == 2864 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
2849 le16_to_cpu(p->crc)) { 2865 le16_to_cpu(p->crc)) {
2850 printk(KERN_INFO "ONFI param page %d valid\n", i); 2866 pr_info("ONFI param page %d valid\n", i);
2851 break; 2867 break;
2852 } 2868 }
2853 } 2869 }
@@ -2855,7 +2871,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2855 if (i == 3) 2871 if (i == 3)
2856 return 0; 2872 return 0;
2857 2873
2858 /* check version */ 2874 /* Check version */
2859 val = le16_to_cpu(p->revision); 2875 val = le16_to_cpu(p->revision);
2860 if (val & (1 << 5)) 2876 if (val & (1 << 5))
2861 chip->onfi_version = 23; 2877 chip->onfi_version = 23;
@@ -2871,8 +2887,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2871 chip->onfi_version = 0; 2887 chip->onfi_version = 0;
2872 2888
2873 if (!chip->onfi_version) { 2889 if (!chip->onfi_version) {
2874 printk(KERN_INFO "%s: unsupported ONFI version: %d\n", 2890 pr_info("%s: unsupported ONFI version: %d\n", __func__, val);
2875 __func__, val);
2876 return 0; 2891 return 0;
2877 } 2892 }
2878 2893
@@ -2884,9 +2899,9 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2884 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize; 2899 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
2885 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); 2900 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
2886 chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize; 2901 chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
2887 busw = 0; 2902 *busw = 0;
2888 if (le16_to_cpu(p->features) & 1) 2903 if (le16_to_cpu(p->features) & 1)
2889 busw = NAND_BUSWIDTH_16; 2904 *busw = NAND_BUSWIDTH_16;
2890 2905
2891 chip->options &= ~NAND_CHIPOPTIONS_MSK; 2906 chip->options &= ~NAND_CHIPOPTIONS_MSK;
2892 chip->options |= (NAND_NO_READRDY | 2907 chip->options |= (NAND_NO_READRDY |
@@ -2896,7 +2911,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2896} 2911}
2897 2912
2898/* 2913/*
2899 * Get the flash and manufacturer id and lookup if the type is supported 2914 * Get the flash and manufacturer id and lookup if the type is supported.
2900 */ 2915 */
2901static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, 2916static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2902 struct nand_chip *chip, 2917 struct nand_chip *chip,
@@ -2913,7 +2928,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2913 2928
2914 /* 2929 /*
2915 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) 2930 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
2916 * after power-up 2931 * after power-up.
2917 */ 2932 */
2918 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 2933 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
2919 2934
@@ -2924,7 +2939,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2924 *maf_id = chip->read_byte(mtd); 2939 *maf_id = chip->read_byte(mtd);
2925 *dev_id = chip->read_byte(mtd); 2940 *dev_id = chip->read_byte(mtd);
2926 2941
2927 /* Try again to make sure, as some systems the bus-hold or other 2942 /*
2943 * Try again to make sure, as some systems the bus-hold or other
2928 * interface concerns can cause random data which looks like a 2944 * interface concerns can cause random data which looks like a
2929 * possibly credible NAND flash to appear. If the two results do 2945 * possibly credible NAND flash to appear. If the two results do
2930 * not match, ignore the device completely. 2946 * not match, ignore the device completely.
@@ -2936,9 +2952,9 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2936 id_data[i] = chip->read_byte(mtd); 2952 id_data[i] = chip->read_byte(mtd);
2937 2953
2938 if (id_data[0] != *maf_id || id_data[1] != *dev_id) { 2954 if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
2939 printk(KERN_INFO "%s: second ID read did not match " 2955 pr_info("%s: second ID read did not match "
2940 "%02x,%02x against %02x,%02x\n", __func__, 2956 "%02x,%02x against %02x,%02x\n", __func__,
2941 *maf_id, *dev_id, id_data[0], id_data[1]); 2957 *maf_id, *dev_id, id_data[0], id_data[1]);
2942 return ERR_PTR(-ENODEV); 2958 return ERR_PTR(-ENODEV);
2943 } 2959 }
2944 2960
@@ -2952,7 +2968,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2952 chip->onfi_version = 0; 2968 chip->onfi_version = 0;
2953 if (!type->name || !type->pagesize) { 2969 if (!type->name || !type->pagesize) {
2954 /* Check is chip is ONFI compliant */ 2970 /* Check is chip is ONFI compliant */
2955 ret = nand_flash_detect_onfi(mtd, chip, busw); 2971 ret = nand_flash_detect_onfi(mtd, chip, &busw);
2956 if (ret) 2972 if (ret)
2957 goto ident_done; 2973 goto ident_done;
2958 } 2974 }
@@ -2973,7 +2989,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2973 chip->chipsize = (uint64_t)type->chipsize << 20; 2989 chip->chipsize = (uint64_t)type->chipsize << 20;
2974 2990
2975 if (!type->pagesize && chip->init_size) { 2991 if (!type->pagesize && chip->init_size) {
2976 /* set the pagesize, oobsize, erasesize by the driver*/ 2992 /* Set the pagesize, oobsize, erasesize by the driver */
2977 busw = chip->init_size(mtd, chip, id_data); 2993 busw = chip->init_size(mtd, chip, id_data);
2978 } else if (!type->pagesize) { 2994 } else if (!type->pagesize) {
2979 int extid; 2995 int extid;
@@ -3033,7 +3049,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3033 } 3049 }
3034 } else { 3050 } else {
3035 /* 3051 /*
3036 * Old devices have chip data hardcoded in the device id table 3052 * Old devices have chip data hardcoded in the device id table.
3037 */ 3053 */
3038 mtd->erasesize = type->erasesize; 3054 mtd->erasesize = type->erasesize;
3039 mtd->writesize = type->pagesize; 3055 mtd->writesize = type->pagesize;
@@ -3043,7 +3059,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3043 /* 3059 /*
3044 * Check for Spansion/AMD ID + repeating 5th, 6th byte since 3060 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
3045 * some Spansion chips have erasesize that conflicts with size 3061 * some Spansion chips have erasesize that conflicts with size
3046 * listed in nand_ids table 3062 * listed in nand_ids table.
3047 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39) 3063 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
3048 */ 3064 */
3049 if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && 3065 if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 &&
@@ -3057,15 +3073,16 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3057 chip->options &= ~NAND_CHIPOPTIONS_MSK; 3073 chip->options &= ~NAND_CHIPOPTIONS_MSK;
3058 chip->options |= type->options & NAND_CHIPOPTIONS_MSK; 3074 chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
3059 3075
3060 /* Check if chip is a not a samsung device. Do not clear the 3076 /*
3061 * options for chips which are not having an extended id. 3077 * Check if chip is not a Samsung device. Do not clear the
3078 * options for chips which do not have an extended id.
3062 */ 3079 */
3063 if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize) 3080 if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
3064 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; 3081 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
3065ident_done: 3082ident_done:
3066 3083
3067 /* 3084 /*
3068 * Set chip as a default. Board drivers can override it, if necessary 3085 * Set chip as a default. Board drivers can override it, if necessary.
3069 */ 3086 */
3070 chip->options |= NAND_NO_AUTOINCR; 3087 chip->options |= NAND_NO_AUTOINCR;
3071 3088
@@ -3077,21 +3094,21 @@ ident_done:
3077 3094
3078 /* 3095 /*
3079 * Check, if buswidth is correct. Hardware drivers should set 3096 * Check, if buswidth is correct. Hardware drivers should set
3080 * chip correct ! 3097 * chip correct!
3081 */ 3098 */
3082 if (busw != (chip->options & NAND_BUSWIDTH_16)) { 3099 if (busw != (chip->options & NAND_BUSWIDTH_16)) {
3083 printk(KERN_INFO "NAND device: Manufacturer ID:" 3100 pr_info("NAND device: Manufacturer ID:"
3084 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, 3101 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
3085 *dev_id, nand_manuf_ids[maf_idx].name, mtd->name); 3102 *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
3086 printk(KERN_WARNING "NAND bus width %d instead %d bit\n", 3103 pr_warn("NAND bus width %d instead %d bit\n",
3087 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8, 3104 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
3088 busw ? 16 : 8); 3105 busw ? 16 : 8);
3089 return ERR_PTR(-EINVAL); 3106 return ERR_PTR(-EINVAL);
3090 } 3107 }
3091 3108
3092 /* Calculate the address shift from the page size */ 3109 /* Calculate the address shift from the page size */
3093 chip->page_shift = ffs(mtd->writesize) - 1; 3110 chip->page_shift = ffs(mtd->writesize) - 1;
3094 /* Convert chipsize to number of pages per chip -1. */ 3111 /* Convert chipsize to number of pages per chip -1 */
3095 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1; 3112 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
3096 3113
3097 chip->bbt_erase_shift = chip->phys_erase_shift = 3114 chip->bbt_erase_shift = chip->phys_erase_shift =
@@ -3121,7 +3138,7 @@ ident_done:
3121 if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) && 3138 if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
3122 (*maf_id == NAND_MFR_SAMSUNG || 3139 (*maf_id == NAND_MFR_SAMSUNG ||
3123 *maf_id == NAND_MFR_HYNIX)) 3140 *maf_id == NAND_MFR_HYNIX))
3124 chip->options |= NAND_BBT_SCANLASTPAGE; 3141 chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
3125 else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) && 3142 else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
3126 (*maf_id == NAND_MFR_SAMSUNG || 3143 (*maf_id == NAND_MFR_SAMSUNG ||
3127 *maf_id == NAND_MFR_HYNIX || 3144 *maf_id == NAND_MFR_HYNIX ||
@@ -3129,17 +3146,7 @@ ident_done:
3129 *maf_id == NAND_MFR_AMD)) || 3146 *maf_id == NAND_MFR_AMD)) ||
3130 (mtd->writesize == 2048 && 3147 (mtd->writesize == 2048 &&
3131 *maf_id == NAND_MFR_MICRON)) 3148 *maf_id == NAND_MFR_MICRON))
3132 chip->options |= NAND_BBT_SCAN2NDPAGE; 3149 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
3133
3134 /*
3135 * Numonyx/ST 2K pages, x8 bus use BOTH byte 1 and 6
3136 */
3137 if (!(busw & NAND_BUSWIDTH_16) &&
3138 *maf_id == NAND_MFR_STMICRO &&
3139 mtd->writesize == 2048) {
3140 chip->options |= NAND_BBT_SCANBYTE1AND6;
3141 chip->badblockpos = 0;
3142 }
3143 3150
3144 /* Check for AND chips with 4 page planes */ 3151 /* Check for AND chips with 4 page planes */
3145 if (chip->options & NAND_4PAGE_ARRAY) 3152 if (chip->options & NAND_4PAGE_ARRAY)
@@ -3147,12 +3154,11 @@ ident_done:
3147 else 3154 else
3148 chip->erase_cmd = single_erase_cmd; 3155 chip->erase_cmd = single_erase_cmd;
3149 3156
3150 /* Do not replace user supplied command function ! */ 3157 /* Do not replace user supplied command function! */
3151 if (mtd->writesize > 512 && chip->cmdfunc == nand_command) 3158 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
3152 chip->cmdfunc = nand_command_lp; 3159 chip->cmdfunc = nand_command_lp;
3153 3160
3154 /* TODO onfi flash name */ 3161 pr_info("NAND device: Manufacturer ID:"
3155 printk(KERN_INFO "NAND device: Manufacturer ID:"
3156 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id, 3162 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
3157 nand_manuf_ids[maf_idx].name, 3163 nand_manuf_ids[maf_idx].name,
3158 chip->onfi_version ? chip->onfi_params.model : type->name); 3164 chip->onfi_version ? chip->onfi_params.model : type->name);
@@ -3162,12 +3168,12 @@ ident_done:
3162 3168
3163/** 3169/**
3164 * nand_scan_ident - [NAND Interface] Scan for the NAND device 3170 * nand_scan_ident - [NAND Interface] Scan for the NAND device
3165 * @mtd: MTD device structure 3171 * @mtd: MTD device structure
3166 * @maxchips: Number of chips to scan for 3172 * @maxchips: number of chips to scan for
3167 * @table: Alternative NAND ID table 3173 * @table: alternative NAND ID table
3168 * 3174 *
3169 * This is the first phase of the normal nand_scan() function. It 3175 * This is the first phase of the normal nand_scan() function. It reads the
3170 * reads the flash ID and sets up MTD fields accordingly. 3176 * flash ID and sets up MTD fields accordingly.
3171 * 3177 *
3172 * The mtd->owner field must be set to the module of the caller. 3178 * The mtd->owner field must be set to the module of the caller.
3173 */ 3179 */
@@ -3189,7 +3195,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3189 3195
3190 if (IS_ERR(type)) { 3196 if (IS_ERR(type)) {
3191 if (!(chip->options & NAND_SCAN_SILENT_NODEV)) 3197 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
3192 printk(KERN_WARNING "No NAND device found.\n"); 3198 pr_warn("No NAND device found\n");
3193 chip->select_chip(mtd, -1); 3199 chip->select_chip(mtd, -1);
3194 return PTR_ERR(type); 3200 return PTR_ERR(type);
3195 } 3201 }
@@ -3207,7 +3213,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3207 break; 3213 break;
3208 } 3214 }
3209 if (i > 1) 3215 if (i > 1)
3210 printk(KERN_INFO "%d NAND chips detected\n", i); 3216 pr_info("%d NAND chips detected\n", i);
3211 3217
3212 /* Store the number of chips and calc total size for mtd */ 3218 /* Store the number of chips and calc total size for mtd */
3213 chip->numchips = i; 3219 chip->numchips = i;
@@ -3220,11 +3226,11 @@ EXPORT_SYMBOL(nand_scan_ident);
3220 3226
3221/** 3227/**
3222 * nand_scan_tail - [NAND Interface] Scan for the NAND device 3228 * nand_scan_tail - [NAND Interface] Scan for the NAND device
3223 * @mtd: MTD device structure 3229 * @mtd: MTD device structure
3224 * 3230 *
3225 * This is the second phase of the normal nand_scan() function. It 3231 * This is the second phase of the normal nand_scan() function. It fills out
3226 * fills out all the uninitialized function pointers with the defaults 3232 * all the uninitialized function pointers with the defaults and scans for a
3227 * and scans for a bad block table if appropriate. 3233 * bad block table if appropriate.
3228 */ 3234 */
3229int nand_scan_tail(struct mtd_info *mtd) 3235int nand_scan_tail(struct mtd_info *mtd)
3230{ 3236{
@@ -3240,7 +3246,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3240 chip->oob_poi = chip->buffers->databuf + mtd->writesize; 3246 chip->oob_poi = chip->buffers->databuf + mtd->writesize;
3241 3247
3242 /* 3248 /*
3243 * If no default placement scheme is given, select an appropriate one 3249 * If no default placement scheme is given, select an appropriate one.
3244 */ 3250 */
3245 if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) { 3251 if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) {
3246 switch (mtd->oobsize) { 3252 switch (mtd->oobsize) {
@@ -3257,8 +3263,8 @@ int nand_scan_tail(struct mtd_info *mtd)
3257 chip->ecc.layout = &nand_oob_128; 3263 chip->ecc.layout = &nand_oob_128;
3258 break; 3264 break;
3259 default: 3265 default:
3260 printk(KERN_WARNING "No oob scheme defined for " 3266 pr_warn("No oob scheme defined for oobsize %d\n",
3261 "oobsize %d\n", mtd->oobsize); 3267 mtd->oobsize);
3262 BUG(); 3268 BUG();
3263 } 3269 }
3264 } 3270 }
@@ -3267,7 +3273,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3267 chip->write_page = nand_write_page; 3273 chip->write_page = nand_write_page;
3268 3274
3269 /* 3275 /*
3270 * check ECC mode, default to software if 3byte/512byte hardware ECC is 3276 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
3271 * selected and we have 256 byte pagesize fallback to software ECC 3277 * selected and we have 256 byte pagesize fallback to software ECC
3272 */ 3278 */
3273 3279
@@ -3276,15 +3282,15 @@ int nand_scan_tail(struct mtd_info *mtd)
3276 /* Similar to NAND_ECC_HW, but a separate read_page handle */ 3282 /* Similar to NAND_ECC_HW, but a separate read_page handle */
3277 if (!chip->ecc.calculate || !chip->ecc.correct || 3283 if (!chip->ecc.calculate || !chip->ecc.correct ||
3278 !chip->ecc.hwctl) { 3284 !chip->ecc.hwctl) {
3279 printk(KERN_WARNING "No ECC functions supplied; " 3285 pr_warn("No ECC functions supplied; "
3280 "Hardware ECC not possible\n"); 3286 "hardware ECC not possible\n");
3281 BUG(); 3287 BUG();
3282 } 3288 }
3283 if (!chip->ecc.read_page) 3289 if (!chip->ecc.read_page)
3284 chip->ecc.read_page = nand_read_page_hwecc_oob_first; 3290 chip->ecc.read_page = nand_read_page_hwecc_oob_first;
3285 3291
3286 case NAND_ECC_HW: 3292 case NAND_ECC_HW:
3287 /* Use standard hwecc read page function ? */ 3293 /* Use standard hwecc read page function? */
3288 if (!chip->ecc.read_page) 3294 if (!chip->ecc.read_page)
3289 chip->ecc.read_page = nand_read_page_hwecc; 3295 chip->ecc.read_page = nand_read_page_hwecc;
3290 if (!chip->ecc.write_page) 3296 if (!chip->ecc.write_page)
@@ -3305,11 +3311,11 @@ int nand_scan_tail(struct mtd_info *mtd)
3305 chip->ecc.read_page == nand_read_page_hwecc || 3311 chip->ecc.read_page == nand_read_page_hwecc ||
3306 !chip->ecc.write_page || 3312 !chip->ecc.write_page ||
3307 chip->ecc.write_page == nand_write_page_hwecc)) { 3313 chip->ecc.write_page == nand_write_page_hwecc)) {
3308 printk(KERN_WARNING "No ECC functions supplied; " 3314 pr_warn("No ECC functions supplied; "
3309 "Hardware ECC not possible\n"); 3315 "hardware ECC not possible\n");
3310 BUG(); 3316 BUG();
3311 } 3317 }
3312 /* Use standard syndrome read/write page function ? */ 3318 /* Use standard syndrome read/write page function? */
3313 if (!chip->ecc.read_page) 3319 if (!chip->ecc.read_page)
3314 chip->ecc.read_page = nand_read_page_syndrome; 3320 chip->ecc.read_page = nand_read_page_syndrome;
3315 if (!chip->ecc.write_page) 3321 if (!chip->ecc.write_page)
@@ -3325,9 +3331,9 @@ int nand_scan_tail(struct mtd_info *mtd)
3325 3331
3326 if (mtd->writesize >= chip->ecc.size) 3332 if (mtd->writesize >= chip->ecc.size)
3327 break; 3333 break;
3328 printk(KERN_WARNING "%d byte HW ECC not possible on " 3334 pr_warn("%d byte HW ECC not possible on "
3329 "%d byte page size, fallback to SW ECC\n", 3335 "%d byte page size, fallback to SW ECC\n",
3330 chip->ecc.size, mtd->writesize); 3336 chip->ecc.size, mtd->writesize);
3331 chip->ecc.mode = NAND_ECC_SOFT; 3337 chip->ecc.mode = NAND_ECC_SOFT;
3332 3338
3333 case NAND_ECC_SOFT: 3339 case NAND_ECC_SOFT:
@@ -3347,7 +3353,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3347 3353
3348 case NAND_ECC_SOFT_BCH: 3354 case NAND_ECC_SOFT_BCH:
3349 if (!mtd_nand_has_bch()) { 3355 if (!mtd_nand_has_bch()) {
3350 printk(KERN_WARNING "CONFIG_MTD_ECC_BCH not enabled\n"); 3356 pr_warn("CONFIG_MTD_ECC_BCH not enabled\n");
3351 BUG(); 3357 BUG();
3352 } 3358 }
3353 chip->ecc.calculate = nand_bch_calculate_ecc; 3359 chip->ecc.calculate = nand_bch_calculate_ecc;
@@ -3362,8 +3368,8 @@ int nand_scan_tail(struct mtd_info *mtd)
3362 /* 3368 /*
3363 * Board driver should supply ecc.size and ecc.bytes values to 3369 * Board driver should supply ecc.size and ecc.bytes values to
3364 * select how many bits are correctable; see nand_bch_init() 3370 * select how many bits are correctable; see nand_bch_init()
3365 * for details. 3371 * for details. Otherwise, default to 4 bits for large page
3366 * Otherwise, default to 4 bits for large page devices 3372 * devices.
3367 */ 3373 */
3368 if (!chip->ecc.size && (mtd->oobsize >= 64)) { 3374 if (!chip->ecc.size && (mtd->oobsize >= 64)) {
3369 chip->ecc.size = 512; 3375 chip->ecc.size = 512;
@@ -3374,14 +3380,14 @@ int nand_scan_tail(struct mtd_info *mtd)
3374 chip->ecc.bytes, 3380 chip->ecc.bytes,
3375 &chip->ecc.layout); 3381 &chip->ecc.layout);
3376 if (!chip->ecc.priv) { 3382 if (!chip->ecc.priv) {
3377 printk(KERN_WARNING "BCH ECC initialization failed!\n"); 3383 pr_warn("BCH ECC initialization failed!\n");
3378 BUG(); 3384 BUG();
3379 } 3385 }
3380 break; 3386 break;
3381 3387
3382 case NAND_ECC_NONE: 3388 case NAND_ECC_NONE:
3383 printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. " 3389 pr_warn("NAND_ECC_NONE selected by board driver. "
3384 "This is not recommended !!\n"); 3390 "This is not recommended!\n");
3385 chip->ecc.read_page = nand_read_page_raw; 3391 chip->ecc.read_page = nand_read_page_raw;
3386 chip->ecc.write_page = nand_write_page_raw; 3392 chip->ecc.write_page = nand_write_page_raw;
3387 chip->ecc.read_oob = nand_read_oob_std; 3393 chip->ecc.read_oob = nand_read_oob_std;
@@ -3393,14 +3399,19 @@ int nand_scan_tail(struct mtd_info *mtd)
3393 break; 3399 break;
3394 3400
3395 default: 3401 default:
3396 printk(KERN_WARNING "Invalid NAND_ECC_MODE %d\n", 3402 pr_warn("Invalid NAND_ECC_MODE %d\n", chip->ecc.mode);
3397 chip->ecc.mode);
3398 BUG(); 3403 BUG();
3399 } 3404 }
3400 3405
3406 /* For many systems, the standard OOB write also works for raw */
3407 if (!chip->ecc.read_oob_raw)
3408 chip->ecc.read_oob_raw = chip->ecc.read_oob;
3409 if (!chip->ecc.write_oob_raw)
3410 chip->ecc.write_oob_raw = chip->ecc.write_oob;
3411
3401 /* 3412 /*
3402 * The number of bytes available for a client to place data into 3413 * The number of bytes available for a client to place data into
3403 * the out of band area 3414 * the out of band area.
3404 */ 3415 */
3405 chip->ecc.layout->oobavail = 0; 3416 chip->ecc.layout->oobavail = 0;
3406 for (i = 0; chip->ecc.layout->oobfree[i].length 3417 for (i = 0; chip->ecc.layout->oobfree[i].length
@@ -3411,19 +3422,16 @@ int nand_scan_tail(struct mtd_info *mtd)
3411 3422
3412 /* 3423 /*
3413 * Set the number of read / write steps for one page depending on ECC 3424 * Set the number of read / write steps for one page depending on ECC
3414 * mode 3425 * mode.
3415 */ 3426 */
3416 chip->ecc.steps = mtd->writesize / chip->ecc.size; 3427 chip->ecc.steps = mtd->writesize / chip->ecc.size;
3417 if (chip->ecc.steps * chip->ecc.size != mtd->writesize) { 3428 if (chip->ecc.steps * chip->ecc.size != mtd->writesize) {
3418 printk(KERN_WARNING "Invalid ecc parameters\n"); 3429 pr_warn("Invalid ECC parameters\n");
3419 BUG(); 3430 BUG();
3420 } 3431 }
3421 chip->ecc.total = chip->ecc.steps * chip->ecc.bytes; 3432 chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
3422 3433
3423 /* 3434 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
3424 * Allow subpage writes up to ecc.steps. Not possible for MLC
3425 * FLASH.
3426 */
3427 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && 3435 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3428 !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) { 3436 !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
3429 switch (chip->ecc.steps) { 3437 switch (chip->ecc.steps) {
@@ -3481,9 +3489,11 @@ int nand_scan_tail(struct mtd_info *mtd)
3481} 3489}
3482EXPORT_SYMBOL(nand_scan_tail); 3490EXPORT_SYMBOL(nand_scan_tail);
3483 3491
3484/* is_module_text_address() isn't exported, and it's mostly a pointless 3492/*
3493 * is_module_text_address() isn't exported, and it's mostly a pointless
3485 * test if this is a module _anyway_ -- they'd have to try _really_ hard 3494 * test if this is a module _anyway_ -- they'd have to try _really_ hard
3486 * to call us from in-kernel code if the core NAND support is modular. */ 3495 * to call us from in-kernel code if the core NAND support is modular.
3496 */
3487#ifdef MODULE 3497#ifdef MODULE
3488#define caller_is_module() (1) 3498#define caller_is_module() (1)
3489#else 3499#else
@@ -3493,15 +3503,13 @@ EXPORT_SYMBOL(nand_scan_tail);
3493 3503
3494/** 3504/**
3495 * nand_scan - [NAND Interface] Scan for the NAND device 3505 * nand_scan - [NAND Interface] Scan for the NAND device
3496 * @mtd: MTD device structure 3506 * @mtd: MTD device structure
3497 * @maxchips: Number of chips to scan for 3507 * @maxchips: number of chips to scan for
3498 *
3499 * This fills out all the uninitialized function pointers
3500 * with the defaults.
3501 * The flash ID is read and the mtd/chip structures are
3502 * filled with the appropriate values.
3503 * The mtd->owner field must be set to the module of the caller
3504 * 3508 *
3509 * This fills out all the uninitialized function pointers with the defaults.
3510 * The flash ID is read and the mtd/chip structures are filled with the
3511 * appropriate values. The mtd->owner field must be set to the module of the
3512 * caller.
3505 */ 3513 */
3506int nand_scan(struct mtd_info *mtd, int maxchips) 3514int nand_scan(struct mtd_info *mtd, int maxchips)
3507{ 3515{
@@ -3509,8 +3517,7 @@ int nand_scan(struct mtd_info *mtd, int maxchips)
3509 3517
3510 /* Many callers got this wrong, so check for it for a while... */ 3518 /* Many callers got this wrong, so check for it for a while... */
3511 if (!mtd->owner && caller_is_module()) { 3519 if (!mtd->owner && caller_is_module()) {
3512 printk(KERN_CRIT "%s called with NULL mtd->owner!\n", 3520 pr_crit("%s called with NULL mtd->owner!\n", __func__);
3513 __func__);
3514 BUG(); 3521 BUG();
3515 } 3522 }
3516 3523
@@ -3523,8 +3530,8 @@ EXPORT_SYMBOL(nand_scan);
3523 3530
3524/** 3531/**
3525 * nand_release - [NAND Interface] Free resources held by the NAND device 3532 * nand_release - [NAND Interface] Free resources held by the NAND device
3526 * @mtd: MTD device structure 3533 * @mtd: MTD device structure
3527*/ 3534 */
3528void nand_release(struct mtd_info *mtd) 3535void nand_release(struct mtd_info *mtd)
3529{ 3536{
3530 struct nand_chip *chip = mtd->priv; 3537 struct nand_chip *chip = mtd->priv;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 4165857752ca..69148ae3bf58 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -14,7 +14,7 @@
14 * 14 *
15 * When nand_scan_bbt is called, then it tries to find the bad block table 15 * When nand_scan_bbt is called, then it tries to find the bad block table
16 * depending on the options in the BBT descriptor(s). If no flash based BBT 16 * depending on the options in the BBT descriptor(s). If no flash based BBT
17 * (NAND_USE_FLASH_BBT) is specified then the device is scanned for factory 17 * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory
18 * marked good / bad blocks. This information is used to create a memory BBT. 18 * marked good / bad blocks. This information is used to create a memory BBT.
19 * Once a new bad block is discovered then the "factory" information is updated 19 * Once a new bad block is discovered then the "factory" information is updated
20 * on the device. 20 * on the device.
@@ -36,9 +36,9 @@
36 * The table is marked in the OOB area with an ident pattern and a version 36 * The table is marked in the OOB area with an ident pattern and a version
37 * number which indicates which of both tables is more up to date. If the NAND 37 * number which indicates which of both tables is more up to date. If the NAND
38 * controller needs the complete OOB area for the ECC information then the 38 * controller needs the complete OOB area for the ECC information then the
39 * option NAND_USE_FLASH_BBT_NO_OOB should be used: it moves the ident pattern 39 * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of
40 * and the version byte into the data area and the OOB area will remain 40 * course): it moves the ident pattern and the version byte into the data area
41 * untouched. 41 * and the OOB area will remain untouched.
42 * 42 *
43 * The table uses 2 bits per block 43 * The table uses 2 bits per block
44 * 11b: block is good 44 * 11b: block is good
@@ -81,17 +81,15 @@ static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
81 81
82/** 82/**
83 * check_pattern - [GENERIC] check if a pattern is in the buffer 83 * check_pattern - [GENERIC] check if a pattern is in the buffer
84 * @buf: the buffer to search 84 * @buf: the buffer to search
85 * @len: the length of buffer to search 85 * @len: the length of buffer to search
86 * @paglen: the pagelength 86 * @paglen: the pagelength
87 * @td: search pattern descriptor 87 * @td: search pattern descriptor
88 * 88 *
89 * Check for a pattern at the given place. Used to search bad block 89 * Check for a pattern at the given place. Used to search bad block tables and
90 * tables and good / bad block identifiers. 90 * good / bad block identifiers. If the SCAN_EMPTY option is set then check, if
91 * If the SCAN_EMPTY option is set then check, if all bytes except the 91 * all bytes except the pattern area contain 0xff.
92 * pattern area contain 0xff 92 */
93 *
94*/
95static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td) 93static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
96{ 94{
97 int i, end = 0; 95 int i, end = 0;
@@ -110,32 +108,8 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc
110 p += end; 108 p += end;
111 109
112 /* Compare the pattern */ 110 /* Compare the pattern */
113 for (i = 0; i < td->len; i++) { 111 if (memcmp(p, td->pattern, td->len))
114 if (p[i] != td->pattern[i]) 112 return -1;
115 return -1;
116 }
117
118 /* Check both positions 1 and 6 for pattern? */
119 if (td->options & NAND_BBT_SCANBYTE1AND6) {
120 if (td->options & NAND_BBT_SCANEMPTY) {
121 p += td->len;
122 end += NAND_SMALL_BADBLOCK_POS - td->offs;
123 /* Check region between positions 1 and 6 */
124 for (i = 0; i < NAND_SMALL_BADBLOCK_POS - td->offs - td->len;
125 i++) {
126 if (*p++ != 0xff)
127 return -1;
128 }
129 }
130 else {
131 p += NAND_SMALL_BADBLOCK_POS - td->offs;
132 }
133 /* Compare the pattern */
134 for (i = 0; i < td->len; i++) {
135 if (p[i] != td->pattern[i])
136 return -1;
137 }
138 }
139 113
140 if (td->options & NAND_BBT_SCANEMPTY) { 114 if (td->options & NAND_BBT_SCANEMPTY) {
141 p += td->len; 115 p += td->len;
@@ -150,14 +124,13 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc
150 124
151/** 125/**
152 * check_short_pattern - [GENERIC] check if a pattern is in the buffer 126 * check_short_pattern - [GENERIC] check if a pattern is in the buffer
153 * @buf: the buffer to search 127 * @buf: the buffer to search
154 * @td: search pattern descriptor 128 * @td: search pattern descriptor
155 *
156 * Check for a pattern at the given place. Used to search bad block
157 * tables and good / bad block identifiers. Same as check_pattern, but
158 * no optional empty check
159 * 129 *
160*/ 130 * Check for a pattern at the given place. Used to search bad block tables and
131 * good / bad block identifiers. Same as check_pattern, but no optional empty
132 * check.
133 */
161static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td) 134static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
162{ 135{
163 int i; 136 int i;
@@ -168,21 +141,14 @@ static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
168 if (p[td->offs + i] != td->pattern[i]) 141 if (p[td->offs + i] != td->pattern[i])
169 return -1; 142 return -1;
170 } 143 }
171 /* Need to check location 1 AND 6? */
172 if (td->options & NAND_BBT_SCANBYTE1AND6) {
173 for (i = 0; i < td->len; i++) {
174 if (p[NAND_SMALL_BADBLOCK_POS + i] != td->pattern[i])
175 return -1;
176 }
177 }
178 return 0; 144 return 0;
179} 145}
180 146
181/** 147/**
182 * add_marker_len - compute the length of the marker in data area 148 * add_marker_len - compute the length of the marker in data area
183 * @td: BBT descriptor used for computation 149 * @td: BBT descriptor used for computation
184 * 150 *
185 * The length will be 0 if the markeris located in OOB area. 151 * The length will be 0 if the marker is located in OOB area.
186 */ 152 */
187static u32 add_marker_len(struct nand_bbt_descr *td) 153static u32 add_marker_len(struct nand_bbt_descr *td)
188{ 154{
@@ -199,34 +165,33 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
199 165
200/** 166/**
201 * read_bbt - [GENERIC] Read the bad block table starting from page 167 * read_bbt - [GENERIC] Read the bad block table starting from page
202 * @mtd: MTD device structure 168 * @mtd: MTD device structure
203 * @buf: temporary buffer 169 * @buf: temporary buffer
204 * @page: the starting page 170 * @page: the starting page
205 * @num: the number of bbt descriptors to read 171 * @num: the number of bbt descriptors to read
206 * @td: the bbt describtion table 172 * @td: the bbt describtion table
207 * @offs: offset in the memory table 173 * @offs: offset in the memory table
208 * 174 *
209 * Read the bad block table starting from page. 175 * Read the bad block table starting from page.
210 *
211 */ 176 */
212static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num, 177static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
213 struct nand_bbt_descr *td, int offs) 178 struct nand_bbt_descr *td, int offs)
214{ 179{
215 int res, i, j, act = 0; 180 int res, ret = 0, i, j, act = 0;
216 struct nand_chip *this = mtd->priv; 181 struct nand_chip *this = mtd->priv;
217 size_t retlen, len, totlen; 182 size_t retlen, len, totlen;
218 loff_t from; 183 loff_t from;
219 int bits = td->options & NAND_BBT_NRBITS_MSK; 184 int bits = td->options & NAND_BBT_NRBITS_MSK;
220 uint8_t msk = (uint8_t) ((1 << bits) - 1); 185 uint8_t msk = (uint8_t)((1 << bits) - 1);
221 u32 marker_len; 186 u32 marker_len;
222 int reserved_block_code = td->reserved_block_code; 187 int reserved_block_code = td->reserved_block_code;
223 188
224 totlen = (num * bits) >> 3; 189 totlen = (num * bits) >> 3;
225 marker_len = add_marker_len(td); 190 marker_len = add_marker_len(td);
226 from = ((loff_t) page) << this->page_shift; 191 from = ((loff_t)page) << this->page_shift;
227 192
228 while (totlen) { 193 while (totlen) {
229 len = min(totlen, (size_t) (1 << this->bbt_erase_shift)); 194 len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
230 if (marker_len) { 195 if (marker_len) {
231 /* 196 /*
232 * In case the BBT marker is not in the OOB area it 197 * In case the BBT marker is not in the OOB area it
@@ -238,11 +203,18 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
238 } 203 }
239 res = mtd->read(mtd, from, len, &retlen, buf); 204 res = mtd->read(mtd, from, len, &retlen, buf);
240 if (res < 0) { 205 if (res < 0) {
241 if (retlen != len) { 206 if (mtd_is_eccerr(res)) {
242 printk(KERN_INFO "nand_bbt: Error reading bad block table\n"); 207 pr_info("nand_bbt: ECC error in BBT at "
208 "0x%012llx\n", from & ~mtd->writesize);
209 return res;
210 } else if (mtd_is_bitflip(res)) {
211 pr_info("nand_bbt: corrected error in BBT at "
212 "0x%012llx\n", from & ~mtd->writesize);
213 ret = res;
214 } else {
215 pr_info("nand_bbt: error reading BBT\n");
243 return res; 216 return res;
244 } 217 }
245 printk(KERN_WARNING "nand_bbt: ECC error while reading bad block table\n");
246 } 218 }
247 219
248 /* Analyse data */ 220 /* Analyse data */
@@ -253,17 +225,19 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
253 if (tmp == msk) 225 if (tmp == msk)
254 continue; 226 continue;
255 if (reserved_block_code && (tmp == reserved_block_code)) { 227 if (reserved_block_code && (tmp == reserved_block_code)) {
256 printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%012llx\n", 228 pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
257 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 229 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
258 this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06); 230 this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
259 mtd->ecc_stats.bbtblocks++; 231 mtd->ecc_stats.bbtblocks++;
260 continue; 232 continue;
261 } 233 }
262 /* Leave it for now, if its matured we can move this 234 /*
263 * message to MTD_DEBUG_LEVEL0 */ 235 * Leave it for now, if it's matured we can
264 printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%012llx\n", 236 * move this message to pr_debug.
265 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 237 */
266 /* Factory marked bad or worn out ? */ 238 pr_info("nand_read_bbt: bad block at 0x%012llx\n",
239 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
240 /* Factory marked bad or worn out? */
267 if (tmp == 0) 241 if (tmp == 0)
268 this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06); 242 this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
269 else 243 else
@@ -274,20 +248,20 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
274 totlen -= len; 248 totlen -= len;
275 from += len; 249 from += len;
276 } 250 }
277 return 0; 251 return ret;
278} 252}
279 253
280/** 254/**
281 * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page 255 * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
282 * @mtd: MTD device structure 256 * @mtd: MTD device structure
283 * @buf: temporary buffer 257 * @buf: temporary buffer
284 * @td: descriptor for the bad block table 258 * @td: descriptor for the bad block table
285 * @chip: read the table for a specific chip, -1 read all chips. 259 * @chip: read the table for a specific chip, -1 read all chips; applies only if
286 * Applies only if NAND_BBT_PERCHIP option is set 260 * NAND_BBT_PERCHIP option is set
287 * 261 *
288 * Read the bad block table for all chips starting at a given page 262 * Read the bad block table for all chips starting at a given page. We assume
289 * We assume that the bbt bits are in consecutive order. 263 * that the bbt bits are in consecutive order.
290*/ 264 */
291static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip) 265static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
292{ 266{
293 struct nand_chip *this = mtd->priv; 267 struct nand_chip *this = mtd->priv;
@@ -313,9 +287,7 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
313 return 0; 287 return 0;
314} 288}
315 289
316/* 290/* BBT marker is in the first page, no OOB */
317 * BBT marker is in the first page, no OOB.
318 */
319static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs, 291static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
320 struct nand_bbt_descr *td) 292 struct nand_bbt_descr *td)
321{ 293{
@@ -329,35 +301,26 @@ static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
329 return mtd->read(mtd, offs, len, &retlen, buf); 301 return mtd->read(mtd, offs, len, &retlen, buf);
330} 302}
331 303
332/* 304/* Scan read raw data from flash */
333 * Scan read raw data from flash
334 */
335static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs, 305static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
336 size_t len) 306 size_t len)
337{ 307{
338 struct mtd_oob_ops ops; 308 struct mtd_oob_ops ops;
339 int res; 309 int res;
340 310
341 ops.mode = MTD_OOB_RAW; 311 ops.mode = MTD_OPS_RAW;
342 ops.ooboffs = 0; 312 ops.ooboffs = 0;
343 ops.ooblen = mtd->oobsize; 313 ops.ooblen = mtd->oobsize;
344 314
345
346 while (len > 0) { 315 while (len > 0) {
347 if (len <= mtd->writesize) { 316 ops.datbuf = buf;
348 ops.oobbuf = buf + len; 317 ops.len = min(len, (size_t)mtd->writesize);
349 ops.datbuf = buf; 318 ops.oobbuf = buf + ops.len;
350 ops.len = len;
351 return mtd->read_oob(mtd, offs, &ops);
352 } else {
353 ops.oobbuf = buf + mtd->writesize;
354 ops.datbuf = buf;
355 ops.len = mtd->writesize;
356 res = mtd->read_oob(mtd, offs, &ops);
357 319
358 if (res) 320 res = mtd->read_oob(mtd, offs, &ops);
359 return res; 321
360 } 322 if (res)
323 return res;
361 324
362 buf += mtd->oobsize + mtd->writesize; 325 buf += mtd->oobsize + mtd->writesize;
363 len -= mtd->writesize; 326 len -= mtd->writesize;
@@ -374,15 +337,13 @@ static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
374 return scan_read_raw_oob(mtd, buf, offs, len); 337 return scan_read_raw_oob(mtd, buf, offs, len);
375} 338}
376 339
377/* 340/* Scan write data with oob to flash */
378 * Scan write data with oob to flash
379 */
380static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len, 341static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
381 uint8_t *buf, uint8_t *oob) 342 uint8_t *buf, uint8_t *oob)
382{ 343{
383 struct mtd_oob_ops ops; 344 struct mtd_oob_ops ops;
384 345
385 ops.mode = MTD_OOB_PLACE; 346 ops.mode = MTD_OPS_PLACE_OOB;
386 ops.ooboffs = 0; 347 ops.ooboffs = 0;
387 ops.ooblen = mtd->oobsize; 348 ops.ooblen = mtd->oobsize;
388 ops.datbuf = buf; 349 ops.datbuf = buf;
@@ -403,15 +364,14 @@ static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
403 364
404/** 365/**
405 * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page 366 * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
406 * @mtd: MTD device structure 367 * @mtd: MTD device structure
407 * @buf: temporary buffer 368 * @buf: temporary buffer
408 * @td: descriptor for the bad block table 369 * @td: descriptor for the bad block table
409 * @md: descriptor for the bad block table mirror 370 * @md: descriptor for the bad block table mirror
410 * 371 *
411 * Read the bad block table(s) for all chips starting at a given page 372 * Read the bad block table(s) for all chips starting at a given page. We
412 * We assume that the bbt bits are in consecutive order. 373 * assume that the bbt bits are in consecutive order.
413 * 374 */
414*/
415static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, 375static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
416 struct nand_bbt_descr *td, struct nand_bbt_descr *md) 376 struct nand_bbt_descr *td, struct nand_bbt_descr *md)
417{ 377{
@@ -422,8 +382,8 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
422 scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift, 382 scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
423 mtd->writesize, td); 383 mtd->writesize, td);
424 td->version[0] = buf[bbt_get_ver_offs(mtd, td)]; 384 td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
425 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", 385 pr_info("Bad block table at page %d, version 0x%02X\n",
426 td->pages[0], td->version[0]); 386 td->pages[0], td->version[0]);
427 } 387 }
428 388
429 /* Read the mirror version, if available */ 389 /* Read the mirror version, if available */
@@ -431,15 +391,13 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
431 scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift, 391 scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
432 mtd->writesize, td); 392 mtd->writesize, td);
433 md->version[0] = buf[bbt_get_ver_offs(mtd, md)]; 393 md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
434 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", 394 pr_info("Bad block table at page %d, version 0x%02X\n",
435 md->pages[0], md->version[0]); 395 md->pages[0], md->version[0]);
436 } 396 }
437 return 1; 397 return 1;
438} 398}
439 399
440/* 400/* Scan a given block full */
441 * Scan a given block full
442 */
443static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd, 401static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
444 loff_t offs, uint8_t *buf, size_t readlen, 402 loff_t offs, uint8_t *buf, size_t readlen,
445 int scanlen, int len) 403 int scanlen, int len)
@@ -447,7 +405,8 @@ static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
447 int ret, j; 405 int ret, j;
448 406
449 ret = scan_read_raw_oob(mtd, buf, offs, readlen); 407 ret = scan_read_raw_oob(mtd, buf, offs, readlen);
450 if (ret) 408 /* Ignore ECC errors when checking for BBM */
409 if (ret && !mtd_is_bitflip_or_eccerr(ret))
451 return ret; 410 return ret;
452 411
453 for (j = 0; j < len; j++, buf += scanlen) { 412 for (j = 0; j < len; j++, buf += scanlen) {
@@ -457,9 +416,7 @@ static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
457 return 0; 416 return 0;
458} 417}
459 418
460/* 419/* Scan a given block partially */
461 * Scan a given block partially
462 */
463static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd, 420static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
464 loff_t offs, uint8_t *buf, int len) 421 loff_t offs, uint8_t *buf, int len)
465{ 422{
@@ -470,16 +427,16 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
470 ops.oobbuf = buf; 427 ops.oobbuf = buf;
471 ops.ooboffs = 0; 428 ops.ooboffs = 0;
472 ops.datbuf = NULL; 429 ops.datbuf = NULL;
473 ops.mode = MTD_OOB_PLACE; 430 ops.mode = MTD_OPS_PLACE_OOB;
474 431
475 for (j = 0; j < len; j++) { 432 for (j = 0; j < len; j++) {
476 /* 433 /*
477 * Read the full oob until read_oob is fixed to 434 * Read the full oob until read_oob is fixed to handle single
478 * handle single byte reads for 16 bit 435 * byte reads for 16 bit buswidth.
479 * buswidth
480 */ 436 */
481 ret = mtd->read_oob(mtd, offs, &ops); 437 ret = mtd->read_oob(mtd, offs, &ops);
482 if (ret) 438 /* Ignore ECC errors when checking for BBM */
439 if (ret && !mtd_is_bitflip_or_eccerr(ret))
483 return ret; 440 return ret;
484 441
485 if (check_short_pattern(buf, bd)) 442 if (check_short_pattern(buf, bd))
@@ -492,14 +449,14 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
492 449
493/** 450/**
494 * create_bbt - [GENERIC] Create a bad block table by scanning the device 451 * create_bbt - [GENERIC] Create a bad block table by scanning the device
495 * @mtd: MTD device structure 452 * @mtd: MTD device structure
496 * @buf: temporary buffer 453 * @buf: temporary buffer
497 * @bd: descriptor for the good/bad block search pattern 454 * @bd: descriptor for the good/bad block search pattern
498 * @chip: create the table for a specific chip, -1 read all chips. 455 * @chip: create the table for a specific chip, -1 read all chips; applies only
499 * Applies only if NAND_BBT_PERCHIP option is set 456 * if NAND_BBT_PERCHIP option is set
500 * 457 *
501 * Create a bad block table by scanning the device 458 * Create a bad block table by scanning the device for the given good/bad block
502 * for the given good/bad block identify pattern 459 * identify pattern.
503 */ 460 */
504static int create_bbt(struct mtd_info *mtd, uint8_t *buf, 461static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
505 struct nand_bbt_descr *bd, int chip) 462 struct nand_bbt_descr *bd, int chip)
@@ -510,7 +467,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
510 loff_t from; 467 loff_t from;
511 size_t readlen; 468 size_t readlen;
512 469
513 printk(KERN_INFO "Scanning device for bad blocks\n"); 470 pr_info("Scanning device for bad blocks\n");
514 471
515 if (bd->options & NAND_BBT_SCANALLPAGES) 472 if (bd->options & NAND_BBT_SCANALLPAGES)
516 len = 1 << (this->bbt_erase_shift - this->page_shift); 473 len = 1 << (this->bbt_erase_shift - this->page_shift);
@@ -530,14 +487,16 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
530 } 487 }
531 488
532 if (chip == -1) { 489 if (chip == -1) {
533 /* Note that numblocks is 2 * (real numblocks) here, see i+=2 490 /*
534 * below as it makes shifting and masking less painful */ 491 * Note that numblocks is 2 * (real numblocks) here, see i+=2
492 * below as it makes shifting and masking less painful
493 */
535 numblocks = mtd->size >> (this->bbt_erase_shift - 1); 494 numblocks = mtd->size >> (this->bbt_erase_shift - 1);
536 startblock = 0; 495 startblock = 0;
537 from = 0; 496 from = 0;
538 } else { 497 } else {
539 if (chip >= this->numchips) { 498 if (chip >= this->numchips) {
540 printk(KERN_WARNING "create_bbt(): chipnr (%d) > available chips (%d)\n", 499 pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
541 chip + 1, this->numchips); 500 chip + 1, this->numchips);
542 return -EINVAL; 501 return -EINVAL;
543 } 502 }
@@ -547,7 +506,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
547 from = (loff_t)startblock << (this->bbt_erase_shift - 1); 506 from = (loff_t)startblock << (this->bbt_erase_shift - 1);
548 } 507 }
549 508
550 if (this->options & NAND_BBT_SCANLASTPAGE) 509 if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
551 from += mtd->erasesize - (mtd->writesize * len); 510 from += mtd->erasesize - (mtd->writesize * len);
552 511
553 for (i = startblock; i < numblocks;) { 512 for (i = startblock; i < numblocks;) {
@@ -566,8 +525,8 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
566 525
567 if (ret) { 526 if (ret) {
568 this->bbt[i >> 3] |= 0x03 << (i & 0x6); 527 this->bbt[i >> 3] |= 0x03 << (i & 0x6);
569 printk(KERN_WARNING "Bad eraseblock %d at 0x%012llx\n", 528 pr_warn("Bad eraseblock %d at 0x%012llx\n",
570 i >> 1, (unsigned long long)from); 529 i >> 1, (unsigned long long)from);
571 mtd->ecc_stats.badblocks++; 530 mtd->ecc_stats.badblocks++;
572 } 531 }
573 532
@@ -579,20 +538,18 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
579 538
580/** 539/**
581 * search_bbt - [GENERIC] scan the device for a specific bad block table 540 * search_bbt - [GENERIC] scan the device for a specific bad block table
582 * @mtd: MTD device structure 541 * @mtd: MTD device structure
583 * @buf: temporary buffer 542 * @buf: temporary buffer
584 * @td: descriptor for the bad block table 543 * @td: descriptor for the bad block table
585 * 544 *
586 * Read the bad block table by searching for a given ident pattern. 545 * Read the bad block table by searching for a given ident pattern. Search is
587 * Search is preformed either from the beginning up or from the end of 546 * preformed either from the beginning up or from the end of the device
588 * the device downwards. The search starts always at the start of a 547 * downwards. The search starts always at the start of a block. If the option
589 * block. 548 * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
590 * If the option NAND_BBT_PERCHIP is given, each chip is searched 549 * the bad block information of this chip. This is necessary to provide support
591 * for a bbt, which contains the bad block information of this chip. 550 * for certain DOC devices.
592 * This is necessary to provide support for certain DOC devices.
593 * 551 *
594 * The bbt ident pattern resides in the oob area of the first page 552 * The bbt ident pattern resides in the oob area of the first page in a block.
595 * in a block.
596 */ 553 */
597static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td) 554static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
598{ 555{
@@ -603,7 +560,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
603 int bbtblocks; 560 int bbtblocks;
604 int blocktopage = this->bbt_erase_shift - this->page_shift; 561 int blocktopage = this->bbt_erase_shift - this->page_shift;
605 562
606 /* Search direction top -> down ? */ 563 /* Search direction top -> down? */
607 if (td->options & NAND_BBT_LASTBLOCK) { 564 if (td->options & NAND_BBT_LASTBLOCK) {
608 startblock = (mtd->size >> this->bbt_erase_shift) - 1; 565 startblock = (mtd->size >> this->bbt_erase_shift) - 1;
609 dir = -1; 566 dir = -1;
@@ -612,7 +569,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
612 dir = 1; 569 dir = 1;
613 } 570 }
614 571
615 /* Do we have a bbt per chip ? */ 572 /* Do we have a bbt per chip? */
616 if (td->options & NAND_BBT_PERCHIP) { 573 if (td->options & NAND_BBT_PERCHIP) {
617 chips = this->numchips; 574 chips = this->numchips;
618 bbtblocks = this->chipsize >> this->bbt_erase_shift; 575 bbtblocks = this->chipsize >> this->bbt_erase_shift;
@@ -651,23 +608,23 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
651 /* Check, if we found a bbt for each requested chip */ 608 /* Check, if we found a bbt for each requested chip */
652 for (i = 0; i < chips; i++) { 609 for (i = 0; i < chips; i++) {
653 if (td->pages[i] == -1) 610 if (td->pages[i] == -1)
654 printk(KERN_WARNING "Bad block table not found for chip %d\n", i); 611 pr_warn("Bad block table not found for chip %d\n", i);
655 else 612 else
656 printk(KERN_DEBUG "Bad block table found at page %d, version 0x%02X\n", td->pages[i], 613 pr_info("Bad block table found at page %d, version "
657 td->version[i]); 614 "0x%02X\n", td->pages[i], td->version[i]);
658 } 615 }
659 return 0; 616 return 0;
660} 617}
661 618
662/** 619/**
663 * search_read_bbts - [GENERIC] scan the device for bad block table(s) 620 * search_read_bbts - [GENERIC] scan the device for bad block table(s)
664 * @mtd: MTD device structure 621 * @mtd: MTD device structure
665 * @buf: temporary buffer 622 * @buf: temporary buffer
666 * @td: descriptor for the bad block table 623 * @td: descriptor for the bad block table
667 * @md: descriptor for the bad block table mirror 624 * @md: descriptor for the bad block table mirror
668 * 625 *
669 * Search and read the bad block table(s) 626 * Search and read the bad block table(s).
670*/ 627 */
671static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md) 628static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md)
672{ 629{
673 /* Search the primary table */ 630 /* Search the primary table */
@@ -683,16 +640,14 @@ static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt
683 640
684/** 641/**
685 * write_bbt - [GENERIC] (Re)write the bad block table 642 * write_bbt - [GENERIC] (Re)write the bad block table
643 * @mtd: MTD device structure
644 * @buf: temporary buffer
645 * @td: descriptor for the bad block table
646 * @md: descriptor for the bad block table mirror
647 * @chipsel: selector for a specific chip, -1 for all
686 * 648 *
687 * @mtd: MTD device structure 649 * (Re)write the bad block table.
688 * @buf: temporary buffer 650 */
689 * @td: descriptor for the bad block table
690 * @md: descriptor for the bad block table mirror
691 * @chipsel: selector for a specific chip, -1 for all
692 *
693 * (Re)write the bad block table
694 *
695*/
696static int write_bbt(struct mtd_info *mtd, uint8_t *buf, 651static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
697 struct nand_bbt_descr *td, struct nand_bbt_descr *md, 652 struct nand_bbt_descr *td, struct nand_bbt_descr *md,
698 int chipsel) 653 int chipsel)
@@ -711,14 +666,14 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
711 ops.ooblen = mtd->oobsize; 666 ops.ooblen = mtd->oobsize;
712 ops.ooboffs = 0; 667 ops.ooboffs = 0;
713 ops.datbuf = NULL; 668 ops.datbuf = NULL;
714 ops.mode = MTD_OOB_PLACE; 669 ops.mode = MTD_OPS_PLACE_OOB;
715 670
716 if (!rcode) 671 if (!rcode)
717 rcode = 0xff; 672 rcode = 0xff;
718 /* Write bad block table per chip rather than per device ? */ 673 /* Write bad block table per chip rather than per device? */
719 if (td->options & NAND_BBT_PERCHIP) { 674 if (td->options & NAND_BBT_PERCHIP) {
720 numblocks = (int)(this->chipsize >> this->bbt_erase_shift); 675 numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
721 /* Full device write or specific chip ? */ 676 /* Full device write or specific chip? */
722 if (chipsel == -1) { 677 if (chipsel == -1) {
723 nrchips = this->numchips; 678 nrchips = this->numchips;
724 } else { 679 } else {
@@ -732,8 +687,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
732 687
733 /* Loop through the chips */ 688 /* Loop through the chips */
734 for (; chip < nrchips; chip++) { 689 for (; chip < nrchips; chip++) {
735 690 /*
736 /* There was already a version of the table, reuse the page 691 * There was already a version of the table, reuse the page
737 * This applies for absolute placement too, as we have the 692 * This applies for absolute placement too, as we have the
738 * page nr. in td->pages. 693 * page nr. in td->pages.
739 */ 694 */
@@ -742,8 +697,10 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
742 goto write; 697 goto write;
743 } 698 }
744 699
745 /* Automatic placement of the bad block table */ 700 /*
746 /* Search direction top -> down ? */ 701 * Automatic placement of the bad block table. Search direction
702 * top -> down?
703 */
747 if (td->options & NAND_BBT_LASTBLOCK) { 704 if (td->options & NAND_BBT_LASTBLOCK) {
748 startblock = numblocks * (chip + 1) - 1; 705 startblock = numblocks * (chip + 1) - 1;
749 dir = -1; 706 dir = -1;
@@ -767,7 +724,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
767 if (!md || md->pages[chip] != page) 724 if (!md || md->pages[chip] != page)
768 goto write; 725 goto write;
769 } 726 }
770 printk(KERN_ERR "No space left to write bad block table\n"); 727 pr_err("No space left to write bad block table\n");
771 return -ENOSPC; 728 return -ENOSPC;
772 write: 729 write:
773 730
@@ -792,24 +749,22 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
792 749
793 bbtoffs = chip * (numblocks >> 2); 750 bbtoffs = chip * (numblocks >> 2);
794 751
795 to = ((loff_t) page) << this->page_shift; 752 to = ((loff_t)page) << this->page_shift;
796 753
797 /* Must we save the block contents ? */ 754 /* Must we save the block contents? */
798 if (td->options & NAND_BBT_SAVECONTENT) { 755 if (td->options & NAND_BBT_SAVECONTENT) {
799 /* Make it block aligned */ 756 /* Make it block aligned */
800 to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1)); 757 to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
801 len = 1 << this->bbt_erase_shift; 758 len = 1 << this->bbt_erase_shift;
802 res = mtd->read(mtd, to, len, &retlen, buf); 759 res = mtd->read(mtd, to, len, &retlen, buf);
803 if (res < 0) { 760 if (res < 0) {
804 if (retlen != len) { 761 if (retlen != len) {
805 printk(KERN_INFO "nand_bbt: Error " 762 pr_info("nand_bbt: error reading block "
806 "reading block for writing " 763 "for writing the bad block table\n");
807 "the bad block table\n");
808 return res; 764 return res;
809 } 765 }
810 printk(KERN_WARNING "nand_bbt: ECC error " 766 pr_warn("nand_bbt: ECC error while reading "
811 "while reading block for writing " 767 "block for writing bad block table\n");
812 "bad block table\n");
813 } 768 }
814 /* Read oob data */ 769 /* Read oob data */
815 ops.ooblen = (len >> this->page_shift) * mtd->oobsize; 770 ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
@@ -822,19 +777,19 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
822 pageoffs = page - (int)(to >> this->page_shift); 777 pageoffs = page - (int)(to >> this->page_shift);
823 offs = pageoffs << this->page_shift; 778 offs = pageoffs << this->page_shift;
824 /* Preset the bbt area with 0xff */ 779 /* Preset the bbt area with 0xff */
825 memset(&buf[offs], 0xff, (size_t) (numblocks >> sft)); 780 memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
826 ooboffs = len + (pageoffs * mtd->oobsize); 781 ooboffs = len + (pageoffs * mtd->oobsize);
827 782
828 } else if (td->options & NAND_BBT_NO_OOB) { 783 } else if (td->options & NAND_BBT_NO_OOB) {
829 ooboffs = 0; 784 ooboffs = 0;
830 offs = td->len; 785 offs = td->len;
831 /* the version byte */ 786 /* The version byte */
832 if (td->options & NAND_BBT_VERSION) 787 if (td->options & NAND_BBT_VERSION)
833 offs++; 788 offs++;
834 /* Calc length */ 789 /* Calc length */
835 len = (size_t) (numblocks >> sft); 790 len = (size_t)(numblocks >> sft);
836 len += offs; 791 len += offs;
837 /* Make it page aligned ! */ 792 /* Make it page aligned! */
838 len = ALIGN(len, mtd->writesize); 793 len = ALIGN(len, mtd->writesize);
839 /* Preset the buffer with 0xff */ 794 /* Preset the buffer with 0xff */
840 memset(buf, 0xff, len); 795 memset(buf, 0xff, len);
@@ -842,8 +797,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
842 memcpy(buf, td->pattern, td->len); 797 memcpy(buf, td->pattern, td->len);
843 } else { 798 } else {
844 /* Calc length */ 799 /* Calc length */
845 len = (size_t) (numblocks >> sft); 800 len = (size_t)(numblocks >> sft);
846 /* Make it page aligned ! */ 801 /* Make it page aligned! */
847 len = ALIGN(len, mtd->writesize); 802 len = ALIGN(len, mtd->writesize);
848 /* Preset the buffer with 0xff */ 803 /* Preset the buffer with 0xff */
849 memset(buf, 0xff, len + 804 memset(buf, 0xff, len +
@@ -857,13 +812,13 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
857 if (td->options & NAND_BBT_VERSION) 812 if (td->options & NAND_BBT_VERSION)
858 buf[ooboffs + td->veroffs] = td->version[chip]; 813 buf[ooboffs + td->veroffs] = td->version[chip];
859 814
860 /* walk through the memory table */ 815 /* Walk through the memory table */
861 for (i = 0; i < numblocks;) { 816 for (i = 0; i < numblocks;) {
862 uint8_t dat; 817 uint8_t dat;
863 dat = this->bbt[bbtoffs + (i >> 2)]; 818 dat = this->bbt[bbtoffs + (i >> 2)];
864 for (j = 0; j < 4; j++, i++) { 819 for (j = 0; j < 4; j++, i++) {
865 int sftcnt = (i << (3 - sft)) & sftmsk; 820 int sftcnt = (i << (3 - sft)) & sftmsk;
866 /* Do not store the reserved bbt blocks ! */ 821 /* Do not store the reserved bbt blocks! */
867 buf[offs + (i >> sft)] &= 822 buf[offs + (i >> sft)] &=
868 ~(msk[dat & 0x03] << sftcnt); 823 ~(msk[dat & 0x03] << sftcnt);
869 dat >>= 2; 824 dat >>= 2;
@@ -884,8 +839,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
884 if (res < 0) 839 if (res < 0)
885 goto outerr; 840 goto outerr;
886 841
887 printk(KERN_DEBUG "Bad block table written to 0x%012llx, version " 842 pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
888 "0x%02X\n", (unsigned long long)to, td->version[chip]); 843 (unsigned long long)to, td->version[chip]);
889 844
890 /* Mark it as used */ 845 /* Mark it as used */
891 td->pages[chip] = page; 846 td->pages[chip] = page;
@@ -893,19 +848,18 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
893 return 0; 848 return 0;
894 849
895 outerr: 850 outerr:
896 printk(KERN_WARNING 851 pr_warn("nand_bbt: error while writing bad block table %d\n", res);
897 "nand_bbt: Error while writing bad block table %d\n", res);
898 return res; 852 return res;
899} 853}
900 854
901/** 855/**
902 * nand_memory_bbt - [GENERIC] create a memory based bad block table 856 * nand_memory_bbt - [GENERIC] create a memory based bad block table
903 * @mtd: MTD device structure 857 * @mtd: MTD device structure
904 * @bd: descriptor for the good/bad block search pattern 858 * @bd: descriptor for the good/bad block search pattern
905 * 859 *
906 * The function creates a memory based bbt by scanning the device 860 * The function creates a memory based bbt by scanning the device for
907 * for manufacturer / software marked good / bad blocks 861 * manufacturer / software marked good / bad blocks.
908*/ 862 */
909static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) 863static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
910{ 864{
911 struct nand_chip *this = mtd->priv; 865 struct nand_chip *this = mtd->priv;
@@ -916,25 +870,24 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b
916 870
917/** 871/**
918 * check_create - [GENERIC] create and write bbt(s) if necessary 872 * check_create - [GENERIC] create and write bbt(s) if necessary
919 * @mtd: MTD device structure 873 * @mtd: MTD device structure
920 * @buf: temporary buffer 874 * @buf: temporary buffer
921 * @bd: descriptor for the good/bad block search pattern 875 * @bd: descriptor for the good/bad block search pattern
922 * 876 *
923 * The function checks the results of the previous call to read_bbt 877 * The function checks the results of the previous call to read_bbt and creates
924 * and creates / updates the bbt(s) if necessary 878 * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
925 * Creation is necessary if no bbt was found for the chip/device 879 * for the chip/device. Update is necessary if one of the tables is missing or
926 * Update is necessary if one of the tables is missing or the 880 * the version nr. of one table is less than the other.
927 * version nr. of one table is less than the other 881 */
928*/
929static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd) 882static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
930{ 883{
931 int i, chips, writeops, chipsel, res; 884 int i, chips, writeops, create, chipsel, res, res2;
932 struct nand_chip *this = mtd->priv; 885 struct nand_chip *this = mtd->priv;
933 struct nand_bbt_descr *td = this->bbt_td; 886 struct nand_bbt_descr *td = this->bbt_td;
934 struct nand_bbt_descr *md = this->bbt_md; 887 struct nand_bbt_descr *md = this->bbt_md;
935 struct nand_bbt_descr *rd, *rd2; 888 struct nand_bbt_descr *rd, *rd2;
936 889
937 /* Do we have a bbt per chip ? */ 890 /* Do we have a bbt per chip? */
938 if (td->options & NAND_BBT_PERCHIP) 891 if (td->options & NAND_BBT_PERCHIP)
939 chips = this->numchips; 892 chips = this->numchips;
940 else 893 else
@@ -942,86 +895,98 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
942 895
943 for (i = 0; i < chips; i++) { 896 for (i = 0; i < chips; i++) {
944 writeops = 0; 897 writeops = 0;
898 create = 0;
945 rd = NULL; 899 rd = NULL;
946 rd2 = NULL; 900 rd2 = NULL;
947 /* Per chip or per device ? */ 901 res = res2 = 0;
902 /* Per chip or per device? */
948 chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1; 903 chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
949 /* Mirrored table available ? */ 904 /* Mirrored table available? */
950 if (md) { 905 if (md) {
951 if (td->pages[i] == -1 && md->pages[i] == -1) { 906 if (td->pages[i] == -1 && md->pages[i] == -1) {
907 create = 1;
952 writeops = 0x03; 908 writeops = 0x03;
953 goto create; 909 } else if (td->pages[i] == -1) {
954 }
955
956 if (td->pages[i] == -1) {
957 rd = md; 910 rd = md;
958 td->version[i] = md->version[i]; 911 writeops = 0x01;
959 writeops = 1; 912 } else if (md->pages[i] == -1) {
960 goto writecheck;
961 }
962
963 if (md->pages[i] == -1) {
964 rd = td; 913 rd = td;
965 md->version[i] = td->version[i]; 914 writeops = 0x02;
966 writeops = 2; 915 } else if (td->version[i] == md->version[i]) {
967 goto writecheck;
968 }
969
970 if (td->version[i] == md->version[i]) {
971 rd = td; 916 rd = td;
972 if (!(td->options & NAND_BBT_VERSION)) 917 if (!(td->options & NAND_BBT_VERSION))
973 rd2 = md; 918 rd2 = md;
974 goto writecheck; 919 } else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
975 }
976
977 if (((int8_t) (td->version[i] - md->version[i])) > 0) {
978 rd = td; 920 rd = td;
979 md->version[i] = td->version[i]; 921 writeops = 0x02;
980 writeops = 2;
981 } else { 922 } else {
982 rd = md; 923 rd = md;
983 td->version[i] = md->version[i]; 924 writeops = 0x01;
984 writeops = 1;
985 } 925 }
986
987 goto writecheck;
988
989 } else { 926 } else {
990 if (td->pages[i] == -1) { 927 if (td->pages[i] == -1) {
928 create = 1;
991 writeops = 0x01; 929 writeops = 0x01;
992 goto create; 930 } else {
931 rd = td;
993 } 932 }
994 rd = td;
995 goto writecheck;
996 } 933 }
997 create:
998 /* Create the bad block table by scanning the device ? */
999 if (!(td->options & NAND_BBT_CREATE))
1000 continue;
1001 934
1002 /* Create the table in memory by scanning the chip(s) */ 935 if (create) {
1003 if (!(this->options & NAND_CREATE_EMPTY_BBT)) 936 /* Create the bad block table by scanning the device? */
1004 create_bbt(mtd, buf, bd, chipsel); 937 if (!(td->options & NAND_BBT_CREATE))
1005 938 continue;
1006 td->version[i] = 1; 939
1007 if (md) 940 /* Create the table in memory by scanning the chip(s) */
1008 md->version[i] = 1; 941 if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
1009 writecheck: 942 create_bbt(mtd, buf, bd, chipsel);
1010 /* read back first ? */ 943
1011 if (rd) 944 td->version[i] = 1;
1012 read_abs_bbt(mtd, buf, rd, chipsel); 945 if (md)
1013 /* If they weren't versioned, read both. */ 946 md->version[i] = 1;
1014 if (rd2) 947 }
1015 read_abs_bbt(mtd, buf, rd2, chipsel); 948
1016 949 /* Read back first? */
1017 /* Write the bad block table to the device ? */ 950 if (rd) {
951 res = read_abs_bbt(mtd, buf, rd, chipsel);
952 if (mtd_is_eccerr(res)) {
953 /* Mark table as invalid */
954 rd->pages[i] = -1;
955 rd->version[i] = 0;
956 i--;
957 continue;
958 }
959 }
960 /* If they weren't versioned, read both */
961 if (rd2) {
962 res2 = read_abs_bbt(mtd, buf, rd2, chipsel);
963 if (mtd_is_eccerr(res2)) {
964 /* Mark table as invalid */
965 rd2->pages[i] = -1;
966 rd2->version[i] = 0;
967 i--;
968 continue;
969 }
970 }
971
972 /* Scrub the flash table(s)? */
973 if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
974 writeops = 0x03;
975
976 /* Update version numbers before writing */
977 if (md) {
978 td->version[i] = max(td->version[i], md->version[i]);
979 md->version[i] = td->version[i];
980 }
981
982 /* Write the bad block table to the device? */
1018 if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) { 983 if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
1019 res = write_bbt(mtd, buf, td, md, chipsel); 984 res = write_bbt(mtd, buf, td, md, chipsel);
1020 if (res < 0) 985 if (res < 0)
1021 return res; 986 return res;
1022 } 987 }
1023 988
1024 /* Write the mirror bad block table to the device ? */ 989 /* Write the mirror bad block table to the device? */
1025 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { 990 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
1026 res = write_bbt(mtd, buf, md, td, chipsel); 991 res = write_bbt(mtd, buf, md, td, chipsel);
1027 if (res < 0) 992 if (res < 0)
@@ -1033,20 +998,19 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
1033 998
1034/** 999/**
1035 * mark_bbt_regions - [GENERIC] mark the bad block table regions 1000 * mark_bbt_regions - [GENERIC] mark the bad block table regions
1036 * @mtd: MTD device structure 1001 * @mtd: MTD device structure
1037 * @td: bad block table descriptor 1002 * @td: bad block table descriptor
1038 * 1003 *
1039 * The bad block table regions are marked as "bad" to prevent 1004 * The bad block table regions are marked as "bad" to prevent accidental
1040 * accidental erasures / writes. The regions are identified by 1005 * erasures / writes. The regions are identified by the mark 0x02.
1041 * the mark 0x02. 1006 */
1042*/
1043static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td) 1007static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
1044{ 1008{
1045 struct nand_chip *this = mtd->priv; 1009 struct nand_chip *this = mtd->priv;
1046 int i, j, chips, block, nrblocks, update; 1010 int i, j, chips, block, nrblocks, update;
1047 uint8_t oldval, newval; 1011 uint8_t oldval, newval;
1048 1012
1049 /* Do we have a bbt per chip ? */ 1013 /* Do we have a bbt per chip? */
1050 if (td->options & NAND_BBT_PERCHIP) { 1014 if (td->options & NAND_BBT_PERCHIP) {
1051 chips = this->numchips; 1015 chips = this->numchips;
1052 nrblocks = (int)(this->chipsize >> this->bbt_erase_shift); 1016 nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
@@ -1083,9 +1047,11 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
1083 update = 1; 1047 update = 1;
1084 block += 2; 1048 block += 2;
1085 } 1049 }
1086 /* If we want reserved blocks to be recorded to flash, and some 1050 /*
1087 new ones have been marked, then we need to update the stored 1051 * If we want reserved blocks to be recorded to flash, and some
1088 bbts. This should only happen once. */ 1052 * new ones have been marked, then we need to update the stored
1053 * bbts. This should only happen once.
1054 */
1089 if (update && td->reserved_block_code) 1055 if (update && td->reserved_block_code)
1090 nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1)); 1056 nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
1091 } 1057 }
@@ -1093,8 +1059,8 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
1093 1059
1094/** 1060/**
1095 * verify_bbt_descr - verify the bad block description 1061 * verify_bbt_descr - verify the bad block description
1096 * @mtd: MTD device structure 1062 * @mtd: MTD device structure
1097 * @bd: the table to verify 1063 * @bd: the table to verify
1098 * 1064 *
1099 * This functions performs a few sanity checks on the bad block description 1065 * This functions performs a few sanity checks on the bad block description
1100 * table. 1066 * table.
@@ -1112,16 +1078,16 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1112 pattern_len = bd->len; 1078 pattern_len = bd->len;
1113 bits = bd->options & NAND_BBT_NRBITS_MSK; 1079 bits = bd->options & NAND_BBT_NRBITS_MSK;
1114 1080
1115 BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) && 1081 BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
1116 !(this->options & NAND_USE_FLASH_BBT)); 1082 !(this->bbt_options & NAND_BBT_USE_FLASH));
1117 BUG_ON(!bits); 1083 BUG_ON(!bits);
1118 1084
1119 if (bd->options & NAND_BBT_VERSION) 1085 if (bd->options & NAND_BBT_VERSION)
1120 pattern_len++; 1086 pattern_len++;
1121 1087
1122 if (bd->options & NAND_BBT_NO_OOB) { 1088 if (bd->options & NAND_BBT_NO_OOB) {
1123 BUG_ON(!(this->options & NAND_USE_FLASH_BBT)); 1089 BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
1124 BUG_ON(!(this->options & NAND_USE_FLASH_BBT_NO_OOB)); 1090 BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
1125 BUG_ON(bd->offs); 1091 BUG_ON(bd->offs);
1126 if (bd->options & NAND_BBT_VERSION) 1092 if (bd->options & NAND_BBT_VERSION)
1127 BUG_ON(bd->veroffs != bd->len); 1093 BUG_ON(bd->veroffs != bd->len);
@@ -1141,18 +1107,16 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1141 1107
1142/** 1108/**
1143 * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s) 1109 * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
1144 * @mtd: MTD device structure 1110 * @mtd: MTD device structure
1145 * @bd: descriptor for the good/bad block search pattern 1111 * @bd: descriptor for the good/bad block search pattern
1146 *
1147 * The function checks, if a bad block table(s) is/are already
1148 * available. If not it scans the device for manufacturer
1149 * marked good / bad blocks and writes the bad block table(s) to
1150 * the selected place.
1151 * 1112 *
1152 * The bad block table memory is allocated here. It must be freed 1113 * The function checks, if a bad block table(s) is/are already available. If
1153 * by calling the nand_free_bbt function. 1114 * not it scans the device for manufacturer marked good / bad blocks and writes
1115 * the bad block table(s) to the selected place.
1154 * 1116 *
1155*/ 1117 * The bad block table memory is allocated here. It must be freed by calling
1118 * the nand_free_bbt function.
1119 */
1156int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) 1120int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1157{ 1121{
1158 struct nand_chip *this = mtd->priv; 1122 struct nand_chip *this = mtd->priv;
@@ -1162,19 +1126,21 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1162 struct nand_bbt_descr *md = this->bbt_md; 1126 struct nand_bbt_descr *md = this->bbt_md;
1163 1127
1164 len = mtd->size >> (this->bbt_erase_shift + 2); 1128 len = mtd->size >> (this->bbt_erase_shift + 2);
1165 /* Allocate memory (2bit per block) and clear the memory bad block table */ 1129 /*
1130 * Allocate memory (2bit per block) and clear the memory bad block
1131 * table.
1132 */
1166 this->bbt = kzalloc(len, GFP_KERNEL); 1133 this->bbt = kzalloc(len, GFP_KERNEL);
1167 if (!this->bbt) { 1134 if (!this->bbt)
1168 printk(KERN_ERR "nand_scan_bbt: Out of memory\n");
1169 return -ENOMEM; 1135 return -ENOMEM;
1170 }
1171 1136
1172 /* If no primary table decriptor is given, scan the device 1137 /*
1173 * to build a memory based bad block table 1138 * If no primary table decriptor is given, scan the device to build a
1139 * memory based bad block table.
1174 */ 1140 */
1175 if (!td) { 1141 if (!td) {
1176 if ((res = nand_memory_bbt(mtd, bd))) { 1142 if ((res = nand_memory_bbt(mtd, bd))) {
1177 printk(KERN_ERR "nand_bbt: Can't scan flash and build the RAM-based BBT\n"); 1143 pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
1178 kfree(this->bbt); 1144 kfree(this->bbt);
1179 this->bbt = NULL; 1145 this->bbt = NULL;
1180 } 1146 }
@@ -1188,13 +1154,12 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1188 len += (len >> this->page_shift) * mtd->oobsize; 1154 len += (len >> this->page_shift) * mtd->oobsize;
1189 buf = vmalloc(len); 1155 buf = vmalloc(len);
1190 if (!buf) { 1156 if (!buf) {
1191 printk(KERN_ERR "nand_bbt: Out of memory\n");
1192 kfree(this->bbt); 1157 kfree(this->bbt);
1193 this->bbt = NULL; 1158 this->bbt = NULL;
1194 return -ENOMEM; 1159 return -ENOMEM;
1195 } 1160 }
1196 1161
1197 /* Is the bbt at a given page ? */ 1162 /* Is the bbt at a given page? */
1198 if (td->options & NAND_BBT_ABSPAGE) { 1163 if (td->options & NAND_BBT_ABSPAGE) {
1199 res = read_abs_bbts(mtd, buf, td, md); 1164 res = read_abs_bbts(mtd, buf, td, md);
1200 } else { 1165 } else {
@@ -1216,15 +1181,15 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1216 1181
1217/** 1182/**
1218 * nand_update_bbt - [NAND Interface] update bad block table(s) 1183 * nand_update_bbt - [NAND Interface] update bad block table(s)
1219 * @mtd: MTD device structure 1184 * @mtd: MTD device structure
1220 * @offs: the offset of the newly marked block 1185 * @offs: the offset of the newly marked block
1221 * 1186 *
1222 * The function updates the bad block table(s) 1187 * The function updates the bad block table(s).
1223*/ 1188 */
1224int nand_update_bbt(struct mtd_info *mtd, loff_t offs) 1189int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
1225{ 1190{
1226 struct nand_chip *this = mtd->priv; 1191 struct nand_chip *this = mtd->priv;
1227 int len, res = 0, writeops = 0; 1192 int len, res = 0;
1228 int chip, chipsel; 1193 int chip, chipsel;
1229 uint8_t *buf; 1194 uint8_t *buf;
1230 struct nand_bbt_descr *td = this->bbt_td; 1195 struct nand_bbt_descr *td = this->bbt_td;
@@ -1237,14 +1202,10 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
1237 len = (1 << this->bbt_erase_shift); 1202 len = (1 << this->bbt_erase_shift);
1238 len += (len >> this->page_shift) * mtd->oobsize; 1203 len += (len >> this->page_shift) * mtd->oobsize;
1239 buf = kmalloc(len, GFP_KERNEL); 1204 buf = kmalloc(len, GFP_KERNEL);
1240 if (!buf) { 1205 if (!buf)
1241 printk(KERN_ERR "nand_update_bbt: Out of memory\n");
1242 return -ENOMEM; 1206 return -ENOMEM;
1243 }
1244
1245 writeops = md != NULL ? 0x03 : 0x01;
1246 1207
1247 /* Do we have a bbt per chip ? */ 1208 /* Do we have a bbt per chip? */
1248 if (td->options & NAND_BBT_PERCHIP) { 1209 if (td->options & NAND_BBT_PERCHIP) {
1249 chip = (int)(offs >> this->chip_shift); 1210 chip = (int)(offs >> this->chip_shift);
1250 chipsel = chip; 1211 chipsel = chip;
@@ -1257,14 +1218,14 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
1257 if (md) 1218 if (md)
1258 md->version[chip]++; 1219 md->version[chip]++;
1259 1220
1260 /* Write the bad block table to the device ? */ 1221 /* Write the bad block table to the device? */
1261 if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) { 1222 if (td->options & NAND_BBT_WRITE) {
1262 res = write_bbt(mtd, buf, td, md, chipsel); 1223 res = write_bbt(mtd, buf, td, md, chipsel);
1263 if (res < 0) 1224 if (res < 0)
1264 goto out; 1225 goto out;
1265 } 1226 }
1266 /* Write the mirror bad block table to the device ? */ 1227 /* Write the mirror bad block table to the device? */
1267 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { 1228 if (md && (md->options & NAND_BBT_WRITE)) {
1268 res = write_bbt(mtd, buf, md, td, chipsel); 1229 res = write_bbt(mtd, buf, md, td, chipsel);
1269 } 1230 }
1270 1231
@@ -1273,8 +1234,10 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
1273 return res; 1234 return res;
1274} 1235}
1275 1236
1276/* Define some generic bad / good block scan pattern which are used 1237/*
1277 * while scanning a device for factory marked good / bad blocks. */ 1238 * Define some generic bad / good block scan pattern which are used
1239 * while scanning a device for factory marked good / bad blocks.
1240 */
1278static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; 1241static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
1279 1242
1280static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 }; 1243static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
@@ -1286,8 +1249,7 @@ static struct nand_bbt_descr agand_flashbased = {
1286 .pattern = scan_agand_pattern 1249 .pattern = scan_agand_pattern
1287}; 1250};
1288 1251
1289/* Generic flash bbt decriptors 1252/* Generic flash bbt descriptors */
1290*/
1291static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; 1253static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
1292static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' }; 1254static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
1293 1255
@@ -1331,31 +1293,27 @@ static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
1331 .pattern = mirror_pattern 1293 .pattern = mirror_pattern
1332}; 1294};
1333 1295
1334#define BBT_SCAN_OPTIONS (NAND_BBT_SCANLASTPAGE | NAND_BBT_SCAN2NDPAGE | \ 1296#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
1335 NAND_BBT_SCANBYTE1AND6)
1336/** 1297/**
1337 * nand_create_default_bbt_descr - [Internal] Creates a BBT descriptor structure 1298 * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
1338 * @this: NAND chip to create descriptor for 1299 * @this: NAND chip to create descriptor for
1339 * 1300 *
1340 * This function allocates and initializes a nand_bbt_descr for BBM detection 1301 * This function allocates and initializes a nand_bbt_descr for BBM detection
1341 * based on the properties of "this". The new descriptor is stored in 1302 * based on the properties of @this. The new descriptor is stored in
1342 * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when 1303 * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
1343 * passed to this function. 1304 * passed to this function.
1344 *
1345 */ 1305 */
1346static int nand_create_default_bbt_descr(struct nand_chip *this) 1306static int nand_create_badblock_pattern(struct nand_chip *this)
1347{ 1307{
1348 struct nand_bbt_descr *bd; 1308 struct nand_bbt_descr *bd;
1349 if (this->badblock_pattern) { 1309 if (this->badblock_pattern) {
1350 printk(KERN_WARNING "BBT descr already allocated; not replacing.\n"); 1310 pr_warn("Bad block pattern already allocated; not replacing\n");
1351 return -EINVAL; 1311 return -EINVAL;
1352 } 1312 }
1353 bd = kzalloc(sizeof(*bd), GFP_KERNEL); 1313 bd = kzalloc(sizeof(*bd), GFP_KERNEL);
1354 if (!bd) { 1314 if (!bd)
1355 printk(KERN_ERR "nand_create_default_bbt_descr: Out of memory\n");
1356 return -ENOMEM; 1315 return -ENOMEM;
1357 } 1316 bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
1358 bd->options = this->options & BBT_SCAN_OPTIONS;
1359 bd->offs = this->badblockpos; 1317 bd->offs = this->badblockpos;
1360 bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1; 1318 bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
1361 bd->pattern = scan_ff_pattern; 1319 bd->pattern = scan_ff_pattern;
@@ -1366,22 +1324,20 @@ static int nand_create_default_bbt_descr(struct nand_chip *this)
1366 1324
1367/** 1325/**
1368 * nand_default_bbt - [NAND Interface] Select a default bad block table for the device 1326 * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
1369 * @mtd: MTD device structure 1327 * @mtd: MTD device structure
1370 *
1371 * This function selects the default bad block table
1372 * support for the device and calls the nand_scan_bbt function
1373 * 1328 *
1374*/ 1329 * This function selects the default bad block table support for the device and
1330 * calls the nand_scan_bbt function.
1331 */
1375int nand_default_bbt(struct mtd_info *mtd) 1332int nand_default_bbt(struct mtd_info *mtd)
1376{ 1333{
1377 struct nand_chip *this = mtd->priv; 1334 struct nand_chip *this = mtd->priv;
1378 1335
1379 /* Default for AG-AND. We must use a flash based 1336 /*
1380 * bad block table as the devices have factory marked 1337 * Default for AG-AND. We must use a flash based bad block table as the
1381 * _good_ blocks. Erasing those blocks leads to loss 1338 * devices have factory marked _good_ blocks. Erasing those blocks
1382 * of the good / bad information, so we _must_ store 1339 * leads to loss of the good / bad information, so we _must_ store this
1383 * this information in a good / bad table during 1340 * information in a good / bad table during startup.
1384 * startup
1385 */ 1341 */
1386 if (this->options & NAND_IS_AND) { 1342 if (this->options & NAND_IS_AND) {
1387 /* Use the default pattern descriptors */ 1343 /* Use the default pattern descriptors */
@@ -1389,15 +1345,15 @@ int nand_default_bbt(struct mtd_info *mtd)
1389 this->bbt_td = &bbt_main_descr; 1345 this->bbt_td = &bbt_main_descr;
1390 this->bbt_md = &bbt_mirror_descr; 1346 this->bbt_md = &bbt_mirror_descr;
1391 } 1347 }
1392 this->options |= NAND_USE_FLASH_BBT; 1348 this->bbt_options |= NAND_BBT_USE_FLASH;
1393 return nand_scan_bbt(mtd, &agand_flashbased); 1349 return nand_scan_bbt(mtd, &agand_flashbased);
1394 } 1350 }
1395 1351
1396 /* Is a flash based bad block table requested ? */ 1352 /* Is a flash based bad block table requested? */
1397 if (this->options & NAND_USE_FLASH_BBT) { 1353 if (this->bbt_options & NAND_BBT_USE_FLASH) {
1398 /* Use the default pattern descriptors */ 1354 /* Use the default pattern descriptors */
1399 if (!this->bbt_td) { 1355 if (!this->bbt_td) {
1400 if (this->options & NAND_USE_FLASH_BBT_NO_OOB) { 1356 if (this->bbt_options & NAND_BBT_NO_OOB) {
1401 this->bbt_td = &bbt_main_no_bbt_descr; 1357 this->bbt_td = &bbt_main_no_bbt_descr;
1402 this->bbt_md = &bbt_mirror_no_bbt_descr; 1358 this->bbt_md = &bbt_mirror_no_bbt_descr;
1403 } else { 1359 } else {
@@ -1411,18 +1367,17 @@ int nand_default_bbt(struct mtd_info *mtd)
1411 } 1367 }
1412 1368
1413 if (!this->badblock_pattern) 1369 if (!this->badblock_pattern)
1414 nand_create_default_bbt_descr(this); 1370 nand_create_badblock_pattern(this);
1415 1371
1416 return nand_scan_bbt(mtd, this->badblock_pattern); 1372 return nand_scan_bbt(mtd, this->badblock_pattern);
1417} 1373}
1418 1374
1419/** 1375/**
1420 * nand_isbad_bbt - [NAND Interface] Check if a block is bad 1376 * nand_isbad_bbt - [NAND Interface] Check if a block is bad
1421 * @mtd: MTD device structure 1377 * @mtd: MTD device structure
1422 * @offs: offset in the device 1378 * @offs: offset in the device
1423 * @allowbbt: allow access to bad block table region 1379 * @allowbbt: allow access to bad block table region
1424 * 1380 */
1425*/
1426int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) 1381int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
1427{ 1382{
1428 struct nand_chip *this = mtd->priv; 1383 struct nand_chip *this = mtd->priv;
@@ -1433,8 +1388,9 @@ int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
1433 block = (int)(offs >> (this->bbt_erase_shift - 1)); 1388 block = (int)(offs >> (this->bbt_erase_shift - 1));
1434 res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03; 1389 res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
1435 1390
1436 DEBUG(MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n", 1391 pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: "
1437 (unsigned int)offs, block >> 1, res); 1392 "(block %d) 0x%02x\n",
1393 (unsigned int)offs, block >> 1, res);
1438 1394
1439 switch ((int)res) { 1395 switch ((int)res) {
1440 case 0x00: 1396 case 0x00:
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
index 0f931e757116..3803e0bba23b 100644
--- a/drivers/mtd/nand/nand_bch.c
+++ b/drivers/mtd/nand/nand_bch.c
@@ -93,8 +93,8 @@ int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
93 buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7)); 93 buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
94 /* else error in ecc, no action needed */ 94 /* else error in ecc, no action needed */
95 95
96 DEBUG(MTD_DEBUG_LEVEL0, "%s: corrected bitflip %u\n", 96 pr_debug("%s: corrected bitflip %u\n", __func__,
97 __func__, errloc[i]); 97 errloc[i]);
98 } 98 }
99 } else if (count < 0) { 99 } else if (count < 0) {
100 printk(KERN_ERR "ecc unrecoverable error\n"); 100 printk(KERN_ERR "ecc unrecoverable error\n");
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 271b8e735e8f..b7cfe0d37121 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -110,7 +110,7 @@ static const char bitsperbyte[256] = {
110 110
111/* 111/*
112 * addressbits is a lookup table to filter out the bits from the xor-ed 112 * addressbits is a lookup table to filter out the bits from the xor-ed
113 * ecc data that identify the faulty location. 113 * ECC data that identify the faulty location.
114 * this is only used for repairing parity 114 * this is only used for repairing parity
115 * see the comments in nand_correct_data for more details 115 * see the comments in nand_correct_data for more details
116 */ 116 */
@@ -153,7 +153,7 @@ static const char addressbits[256] = {
153 * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte 153 * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
154 * block 154 * block
155 * @buf: input buffer with raw data 155 * @buf: input buffer with raw data
156 * @eccsize: data bytes per ecc step (256 or 512) 156 * @eccsize: data bytes per ECC step (256 or 512)
157 * @code: output buffer with ECC 157 * @code: output buffer with ECC
158 */ 158 */
159void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, 159void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
@@ -348,7 +348,7 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
348 rp17 = (par ^ rp16) & 0xff; 348 rp17 = (par ^ rp16) & 0xff;
349 349
350 /* 350 /*
351 * Finally calculate the ecc bits. 351 * Finally calculate the ECC bits.
352 * Again here it might seem that there are performance optimisations 352 * Again here it might seem that there are performance optimisations
353 * possible, but benchmarks showed that on the system this is developed 353 * possible, but benchmarks showed that on the system this is developed
354 * the code below is the fastest 354 * the code below is the fastest
@@ -436,7 +436,7 @@ EXPORT_SYMBOL(nand_calculate_ecc);
436 * @buf: raw data read from the chip 436 * @buf: raw data read from the chip
437 * @read_ecc: ECC from the chip 437 * @read_ecc: ECC from the chip
438 * @calc_ecc: the ECC calculated from raw data 438 * @calc_ecc: the ECC calculated from raw data
439 * @eccsize: data bytes per ecc step (256 or 512) 439 * @eccsize: data bytes per ECC step (256 or 512)
440 * 440 *
441 * Detect and correct a 1 bit error for eccsize byte block 441 * Detect and correct a 1 bit error for eccsize byte block
442 */ 442 */
@@ -505,7 +505,7 @@ int __nand_correct_data(unsigned char *buf,
505 } 505 }
506 /* count nr of bits; use table lookup, faster than calculating it */ 506 /* count nr of bits; use table lookup, faster than calculating it */
507 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1) 507 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
508 return 1; /* error in ecc data; no action needed */ 508 return 1; /* error in ECC data; no action needed */
509 509
510 printk(KERN_ERR "uncorrectable error : "); 510 printk(KERN_ERR "uncorrectable error : ");
511 return -1; 511 return -1;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 357e8c5252a8..34c03be77301 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -2273,9 +2273,9 @@ static int __init ns_init_module(void)
2273 2273
2274 switch (bbt) { 2274 switch (bbt) {
2275 case 2: 2275 case 2:
2276 chip->options |= NAND_USE_FLASH_BBT_NO_OOB; 2276 chip->bbt_options |= NAND_BBT_NO_OOB;
2277 case 1: 2277 case 1:
2278 chip->options |= NAND_USE_FLASH_BBT; 2278 chip->bbt_options |= NAND_BBT_USE_FLASH;
2279 case 0: 2279 case 0:
2280 break; 2280 break;
2281 default: 2281 default:
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index ea2dea8a9c88..ee1713907b92 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -42,7 +42,6 @@ struct ndfc_controller {
42 struct nand_chip chip; 42 struct nand_chip chip;
43 int chip_select; 43 int chip_select;
44 struct nand_hw_control ndfc_control; 44 struct nand_hw_control ndfc_control;
45 struct mtd_partition *parts;
46}; 45};
47 46
48static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS]; 47static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
@@ -159,13 +158,9 @@ static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
159static int ndfc_chip_init(struct ndfc_controller *ndfc, 158static int ndfc_chip_init(struct ndfc_controller *ndfc,
160 struct device_node *node) 159 struct device_node *node)
161{ 160{
162#ifdef CONFIG_MTD_CMDLINE_PARTS
163 static const char *part_types[] = { "cmdlinepart", NULL };
164#else
165 static const char *part_types[] = { NULL };
166#endif
167 struct device_node *flash_np; 161 struct device_node *flash_np;
168 struct nand_chip *chip = &ndfc->chip; 162 struct nand_chip *chip = &ndfc->chip;
163 struct mtd_part_parser_data ppdata;
169 int ret; 164 int ret;
170 165
171 chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA; 166 chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
@@ -193,6 +188,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
193 if (!flash_np) 188 if (!flash_np)
194 return -ENODEV; 189 return -ENODEV;
195 190
191 ppdata->of_node = flash_np;
196 ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s", 192 ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
197 dev_name(&ndfc->ofdev->dev), flash_np->name); 193 dev_name(&ndfc->ofdev->dev), flash_np->name);
198 if (!ndfc->mtd.name) { 194 if (!ndfc->mtd.name) {
@@ -204,18 +200,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
204 if (ret) 200 if (ret)
205 goto err; 201 goto err;
206 202
207 ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0); 203 ret = mtd_device_parse_register(&ndfc->mtd, NULL, &ppdata, NULL, 0);
208 if (ret < 0)
209 goto err;
210
211 if (ret == 0) {
212 ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np,
213 &ndfc->parts);
214 if (ret < 0)
215 goto err;
216 }
217
218 ret = mtd_device_register(&ndfc->mtd, ndfc->parts, ret);
219 204
220err: 205err:
221 of_node_put(flash_np); 206 of_node_put(flash_np);
@@ -288,6 +273,7 @@ static int __devexit ndfc_remove(struct platform_device *ofdev)
288 struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev); 273 struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
289 274
290 nand_release(&ndfc->mtd); 275 nand_release(&ndfc->mtd);
276 kfree(ndfc->mtd.name);
291 277
292 return 0; 278 return 0;
293} 279}
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index b6a5c86ab31e..b463ecfb4c1a 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -187,6 +187,7 @@ static int nomadik_nand_remove(struct platform_device *pdev)
187 pdata->exit(); 187 pdata->exit();
188 188
189 if (host) { 189 if (host) {
190 nand_release(&host->mtd);
190 iounmap(host->cmd_va); 191 iounmap(host->cmd_va);
191 iounmap(host->data_va); 192 iounmap(host->data_va);
192 iounmap(host->addr_va); 193 iounmap(host->addr_va);
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 9c30a0b03171..fa8faedfad6e 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -339,6 +339,7 @@ static int __devexit nuc900_nand_remove(struct platform_device *pdev)
339 struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); 339 struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
340 struct resource *res; 340 struct resource *res;
341 341
342 nand_release(&nuc900_nand->mtd);
342 iounmap(nuc900_nand->reg); 343 iounmap(nuc900_nand->reg);
343 344
344 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 345 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index ec22a5aab038..f745f00f3167 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -95,8 +95,6 @@
95#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) 95#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
96#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) 96#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
97 97
98static const char *part_probes[] = { "cmdlinepart", NULL };
99
100/* oob info generated runtime depending on ecc algorithm and layout selected */ 98/* oob info generated runtime depending on ecc algorithm and layout selected */
101static struct nand_ecclayout omap_oobinfo; 99static struct nand_ecclayout omap_oobinfo;
102/* Define some generic bad / good block scan pattern which are used 100/* Define some generic bad / good block scan pattern which are used
@@ -115,7 +113,6 @@ struct omap_nand_info {
115 struct nand_hw_control controller; 113 struct nand_hw_control controller;
116 struct omap_nand_platform_data *pdata; 114 struct omap_nand_platform_data *pdata;
117 struct mtd_info mtd; 115 struct mtd_info mtd;
118 struct mtd_partition *parts;
119 struct nand_chip nand; 116 struct nand_chip nand;
120 struct platform_device *pdev; 117 struct platform_device *pdev;
121 118
@@ -745,12 +742,12 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
745 742
746 case 1: 743 case 1:
747 /* Uncorrectable error */ 744 /* Uncorrectable error */
748 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n"); 745 pr_debug("ECC UNCORRECTED_ERROR 1\n");
749 return -1; 746 return -1;
750 747
751 case 11: 748 case 11:
752 /* UN-Correctable error */ 749 /* UN-Correctable error */
753 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n"); 750 pr_debug("ECC UNCORRECTED_ERROR B\n");
754 return -1; 751 return -1;
755 752
756 case 12: 753 case 12:
@@ -767,8 +764,8 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
767 764
768 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1]; 765 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
769 766
770 DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at " 767 pr_debug("Correcting single bit ECC error at offset: "
771 "offset: %d, bit: %d\n", find_byte, find_bit); 768 "%d, bit: %d\n", find_byte, find_bit);
772 769
773 page_data[find_byte] ^= (1 << find_bit); 770 page_data[find_byte] ^= (1 << find_bit);
774 771
@@ -780,7 +777,7 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
780 ecc_data2[2] == 0) 777 ecc_data2[2] == 0)
781 return 0; 778 return 0;
782 } 779 }
783 DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n"); 780 pr_debug("UNCORRECTED_ERROR default\n");
784 return -1; 781 return -1;
785 } 782 }
786} 783}
@@ -1104,13 +1101,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1104 goto out_release_mem_region; 1101 goto out_release_mem_region;
1105 } 1102 }
1106 1103
1107 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); 1104 mtd_device_parse_register(&info->mtd, NULL, 0,
1108 if (err > 0) 1105 pdata->parts, pdata->nr_parts);
1109 mtd_device_register(&info->mtd, info->parts, err);
1110 else if (pdata->parts)
1111 mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
1112 else
1113 mtd_device_register(&info->mtd, NULL, 0);
1114 1106
1115 platform_set_drvdata(pdev, &info->mtd); 1107 platform_set_drvdata(pdev, &info->mtd);
1116 1108
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 7794d0680f91..29f505adaf84 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -21,8 +21,6 @@
21#include <mach/hardware.h> 21#include <mach/hardware.h>
22#include <plat/orion_nand.h> 22#include <plat/orion_nand.h>
23 23
24static const char *part_probes[] = { "cmdlinepart", NULL };
25
26static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 24static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
27{ 25{
28 struct nand_chip *nc = mtd->priv; 26 struct nand_chip *nc = mtd->priv;
@@ -81,8 +79,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
81 struct resource *res; 79 struct resource *res;
82 void __iomem *io_base; 80 void __iomem *io_base;
83 int ret = 0; 81 int ret = 0;
84 struct mtd_partition *partitions = NULL;
85 int num_part = 0;
86 82
87 nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL); 83 nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
88 if (!nc) { 84 if (!nc) {
@@ -132,17 +128,9 @@ static int __init orion_nand_probe(struct platform_device *pdev)
132 goto no_dev; 128 goto no_dev;
133 } 129 }
134 130
135#ifdef CONFIG_MTD_CMDLINE_PARTS
136 mtd->name = "orion_nand"; 131 mtd->name = "orion_nand";
137 num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0); 132 ret = mtd_device_parse_register(mtd, NULL, 0,
138#endif 133 board->parts, board->nr_parts);
139 /* If cmdline partitions have been passed, let them be used */
140 if (num_part <= 0) {
141 num_part = board->nr_parts;
142 partitions = board->parts;
143 }
144
145 ret = mtd_device_register(mtd, partitions, num_part);
146 if (ret) { 134 if (ret) {
147 nand_release(mtd); 135 nand_release(mtd);
148 goto no_dev; 136 goto no_dev;
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index b1aa41b8a4eb..a97264ececdb 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -155,7 +155,8 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
155 chip->ecc.mode = NAND_ECC_SOFT; 155 chip->ecc.mode = NAND_ECC_SOFT;
156 156
157 /* Enable the following for a flash based bad block table */ 157 /* Enable the following for a flash based bad block table */
158 chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR; 158 chip->options = NAND_NO_AUTOINCR;
159 chip->bbt_options = NAND_BBT_USE_FLASH;
159 160
160 /* Scan to find existence of the device */ 161 /* Scan to find existence of the device */
161 if (nand_scan(pasemi_nand_mtd, 1)) { 162 if (nand_scan(pasemi_nand_mtd, 1)) {
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 633c04bf76f6..ea8e1234e0e2 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -21,8 +21,6 @@ struct plat_nand_data {
21 struct nand_chip chip; 21 struct nand_chip chip;
22 struct mtd_info mtd; 22 struct mtd_info mtd;
23 void __iomem *io_base; 23 void __iomem *io_base;
24 int nr_parts;
25 struct mtd_partition *parts;
26}; 24};
27 25
28/* 26/*
@@ -79,6 +77,7 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
79 data->chip.read_buf = pdata->ctrl.read_buf; 77 data->chip.read_buf = pdata->ctrl.read_buf;
80 data->chip.chip_delay = pdata->chip.chip_delay; 78 data->chip.chip_delay = pdata->chip.chip_delay;
81 data->chip.options |= pdata->chip.options; 79 data->chip.options |= pdata->chip.options;
80 data->chip.bbt_options |= pdata->chip.bbt_options;
82 81
83 data->chip.ecc.hwctl = pdata->ctrl.hwcontrol; 82 data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
84 data->chip.ecc.layout = pdata->chip.ecclayout; 83 data->chip.ecc.layout = pdata->chip.ecclayout;
@@ -99,23 +98,9 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
99 goto out; 98 goto out;
100 } 99 }
101 100
102 if (pdata->chip.part_probe_types) { 101 err = mtd_device_parse_register(&data->mtd,
103 err = parse_mtd_partitions(&data->mtd, 102 pdata->chip.part_probe_types, 0,
104 pdata->chip.part_probe_types, 103 pdata->chip.partitions, pdata->chip.nr_partitions);
105 &data->parts, 0);
106 if (err > 0) {
107 mtd_device_register(&data->mtd, data->parts, err);
108 return 0;
109 }
110 }
111 if (pdata->chip.set_parts)
112 pdata->chip.set_parts(data->mtd.size, &pdata->chip);
113 if (pdata->chip.partitions) {
114 data->parts = pdata->chip.partitions;
115 err = mtd_device_register(&data->mtd, data->parts,
116 pdata->chip.nr_partitions);
117 } else
118 err = mtd_device_register(&data->mtd, NULL, 0);
119 104
120 if (!err) 105 if (!err)
121 return err; 106 return err;
@@ -145,8 +130,6 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
145 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 130 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
146 131
147 nand_release(&data->mtd); 132 nand_release(&data->mtd);
148 if (data->parts && data->parts != pdata->chip.partitions)
149 kfree(data->parts);
150 if (pdata->ctrl.remove) 133 if (pdata->ctrl.remove)
151 pdata->ctrl.remove(pdev); 134 pdata->ctrl.remove(pdev);
152 iounmap(data->io_base); 135 iounmap(data->io_base);
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index 3bbb796b451c..7e52af51a198 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -99,8 +99,6 @@ static struct mtd_partition partition_info_evb[] = {
99 99
100#define NUM_PARTITIONS 1 100#define NUM_PARTITIONS 1
101 101
102extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, const char *mtd_id);
103
104/* 102/*
105 * hardware specific access to control-lines 103 * hardware specific access to control-lines
106 */ 104 */
@@ -187,18 +185,12 @@ static int ppchameleonevb_device_ready(struct mtd_info *minfo)
187} 185}
188#endif 186#endif
189 187
190const char *part_probes[] = { "cmdlinepart", NULL };
191const char *part_probes_evb[] = { "cmdlinepart", NULL };
192
193/* 188/*
194 * Main initialization routine 189 * Main initialization routine
195 */ 190 */
196static int __init ppchameleonevb_init(void) 191static int __init ppchameleonevb_init(void)
197{ 192{
198 struct nand_chip *this; 193 struct nand_chip *this;
199 const char *part_type = 0;
200 int mtd_parts_nb = 0;
201 struct mtd_partition *mtd_parts = 0;
202 void __iomem *ppchameleon_fio_base; 194 void __iomem *ppchameleon_fio_base;
203 void __iomem *ppchameleonevb_fio_base; 195 void __iomem *ppchameleonevb_fio_base;
204 196
@@ -281,24 +273,13 @@ static int __init ppchameleonevb_init(void)
281#endif 273#endif
282 274
283 ppchameleon_mtd->name = "ppchameleon-nand"; 275 ppchameleon_mtd->name = "ppchameleon-nand";
284 mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0);
285 if (mtd_parts_nb > 0)
286 part_type = "command line";
287 else
288 mtd_parts_nb = 0;
289
290 if (mtd_parts_nb == 0) {
291 if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
292 mtd_parts = partition_info_me;
293 else
294 mtd_parts = partition_info_hi;
295 mtd_parts_nb = NUM_PARTITIONS;
296 part_type = "static";
297 }
298 276
299 /* Register the partitions */ 277 /* Register the partitions */
300 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 278 mtd_device_parse_register(ppchameleon_mtd, NULL, 0,
301 mtd_device_register(ppchameleon_mtd, mtd_parts, mtd_parts_nb); 279 ppchameleon_mtd->size == NAND_SMALL_SIZE ?
280 partition_info_me :
281 partition_info_hi,
282 NUM_PARTITIONS);
302 283
303 nand_evb_init: 284 nand_evb_init:
304 /**************************** 285 /****************************
@@ -382,21 +363,13 @@ static int __init ppchameleonevb_init(void)
382 } 363 }
383 364
384 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME; 365 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
385 mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0);
386 if (mtd_parts_nb > 0)
387 part_type = "command line";
388 else
389 mtd_parts_nb = 0;
390
391 if (mtd_parts_nb == 0) {
392 mtd_parts = partition_info_evb;
393 mtd_parts_nb = NUM_PARTITIONS;
394 part_type = "static";
395 }
396 366
397 /* Register the partitions */ 367 /* Register the partitions */
398 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 368 mtd_device_parse_register(ppchameleonevb_mtd, NULL, 0,
399 mtd_device_register(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb); 369 ppchameleon_mtd->size == NAND_SMALL_SIZE ?
370 partition_info_me :
371 partition_info_hi,
372 NUM_PARTITIONS);
400 373
401 /* Return happy */ 374 /* Return happy */
402 return 0; 375 return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 1fb3b3a80581..9eb7f879969e 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -110,6 +110,7 @@ enum {
110 110
111enum { 111enum {
112 STATE_IDLE = 0, 112 STATE_IDLE = 0,
113 STATE_PREPARED,
113 STATE_CMD_HANDLE, 114 STATE_CMD_HANDLE,
114 STATE_DMA_READING, 115 STATE_DMA_READING,
115 STATE_DMA_WRITING, 116 STATE_DMA_WRITING,
@@ -120,21 +121,40 @@ enum {
120 STATE_READY, 121 STATE_READY,
121}; 122};
122 123
123struct pxa3xx_nand_info { 124struct pxa3xx_nand_host {
124 struct nand_chip nand_chip; 125 struct nand_chip chip;
126 struct pxa3xx_nand_cmdset *cmdset;
127 struct mtd_info *mtd;
128 void *info_data;
129
130 /* page size of attached chip */
131 unsigned int page_size;
132 int use_ecc;
133 int cs;
125 134
135 /* calculated from pxa3xx_nand_flash data */
136 unsigned int col_addr_cycles;
137 unsigned int row_addr_cycles;
138 size_t read_id_bytes;
139
140 /* cached register value */
141 uint32_t reg_ndcr;
142 uint32_t ndtr0cs0;
143 uint32_t ndtr1cs0;
144};
145
146struct pxa3xx_nand_info {
126 struct nand_hw_control controller; 147 struct nand_hw_control controller;
127 struct platform_device *pdev; 148 struct platform_device *pdev;
128 struct pxa3xx_nand_cmdset *cmdset;
129 149
130 struct clk *clk; 150 struct clk *clk;
131 void __iomem *mmio_base; 151 void __iomem *mmio_base;
132 unsigned long mmio_phys; 152 unsigned long mmio_phys;
153 struct completion cmd_complete;
133 154
134 unsigned int buf_start; 155 unsigned int buf_start;
135 unsigned int buf_count; 156 unsigned int buf_count;
136 157
137 struct mtd_info *mtd;
138 /* DMA information */ 158 /* DMA information */
139 int drcmr_dat; 159 int drcmr_dat;
140 int drcmr_cmd; 160 int drcmr_cmd;
@@ -142,44 +162,27 @@ struct pxa3xx_nand_info {
142 unsigned char *data_buff; 162 unsigned char *data_buff;
143 unsigned char *oob_buff; 163 unsigned char *oob_buff;
144 dma_addr_t data_buff_phys; 164 dma_addr_t data_buff_phys;
145 size_t data_buff_size;
146 int data_dma_ch; 165 int data_dma_ch;
147 struct pxa_dma_desc *data_desc; 166 struct pxa_dma_desc *data_desc;
148 dma_addr_t data_desc_addr; 167 dma_addr_t data_desc_addr;
149 168
150 uint32_t reg_ndcr; 169 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
151
152 /* saved column/page_addr during CMD_SEQIN */
153 int seqin_column;
154 int seqin_page_addr;
155
156 /* relate to the command */
157 unsigned int state; 170 unsigned int state;
158 171
172 int cs;
159 int use_ecc; /* use HW ECC ? */ 173 int use_ecc; /* use HW ECC ? */
160 int use_dma; /* use DMA ? */ 174 int use_dma; /* use DMA ? */
161 int is_ready; 175 int is_ready;
162 176
163 unsigned int page_size; /* page size of attached chip */ 177 unsigned int page_size; /* page size of attached chip */
164 unsigned int data_size; /* data size in FIFO */ 178 unsigned int data_size; /* data size in FIFO */
179 unsigned int oob_size;
165 int retcode; 180 int retcode;
166 struct completion cmd_complete;
167 181
168 /* generated NDCBx register values */ 182 /* generated NDCBx register values */
169 uint32_t ndcb0; 183 uint32_t ndcb0;
170 uint32_t ndcb1; 184 uint32_t ndcb1;
171 uint32_t ndcb2; 185 uint32_t ndcb2;
172
173 /* timing calcuted from setting */
174 uint32_t ndtr0cs0;
175 uint32_t ndtr1cs0;
176
177 /* calculated from pxa3xx_nand_flash data */
178 size_t oob_size;
179 size_t read_id_bytes;
180
181 unsigned int col_addr_cycles;
182 unsigned int row_addr_cycles;
183}; 186};
184 187
185static int use_dma = 1; 188static int use_dma = 1;
@@ -225,7 +228,7 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {
225/* Define a default flash type setting serve as flash detecting only */ 228/* Define a default flash type setting serve as flash detecting only */
226#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0]) 229#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
227 230
228const char *mtd_names[] = {"pxa3xx_nand-0", NULL}; 231const char *mtd_names[] = {"pxa3xx_nand-0", "pxa3xx_nand-1", NULL};
229 232
230#define NDTR0_tCH(c) (min((c), 7) << 19) 233#define NDTR0_tCH(c) (min((c), 7) << 19)
231#define NDTR0_tCS(c) (min((c), 7) << 16) 234#define NDTR0_tCS(c) (min((c), 7) << 16)
@@ -241,9 +244,10 @@ const char *mtd_names[] = {"pxa3xx_nand-0", NULL};
241/* convert nano-seconds to nand flash controller clock cycles */ 244/* convert nano-seconds to nand flash controller clock cycles */
242#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000) 245#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
243 246
244static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info, 247static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
245 const struct pxa3xx_nand_timing *t) 248 const struct pxa3xx_nand_timing *t)
246{ 249{
250 struct pxa3xx_nand_info *info = host->info_data;
247 unsigned long nand_clk = clk_get_rate(info->clk); 251 unsigned long nand_clk = clk_get_rate(info->clk);
248 uint32_t ndtr0, ndtr1; 252 uint32_t ndtr0, ndtr1;
249 253
@@ -258,23 +262,24 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
258 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) | 262 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
259 NDTR1_tAR(ns2cycle(t->tAR, nand_clk)); 263 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
260 264
261 info->ndtr0cs0 = ndtr0; 265 host->ndtr0cs0 = ndtr0;
262 info->ndtr1cs0 = ndtr1; 266 host->ndtr1cs0 = ndtr1;
263 nand_writel(info, NDTR0CS0, ndtr0); 267 nand_writel(info, NDTR0CS0, ndtr0);
264 nand_writel(info, NDTR1CS0, ndtr1); 268 nand_writel(info, NDTR1CS0, ndtr1);
265} 269}
266 270
267static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) 271static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
268{ 272{
269 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN; 273 struct pxa3xx_nand_host *host = info->host[info->cs];
274 int oob_enable = host->reg_ndcr & NDCR_SPARE_EN;
270 275
271 info->data_size = info->page_size; 276 info->data_size = host->page_size;
272 if (!oob_enable) { 277 if (!oob_enable) {
273 info->oob_size = 0; 278 info->oob_size = 0;
274 return; 279 return;
275 } 280 }
276 281
277 switch (info->page_size) { 282 switch (host->page_size) {
278 case 2048: 283 case 2048:
279 info->oob_size = (info->use_ecc) ? 40 : 64; 284 info->oob_size = (info->use_ecc) ? 40 : 64;
280 break; 285 break;
@@ -292,9 +297,10 @@ static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
292 */ 297 */
293static void pxa3xx_nand_start(struct pxa3xx_nand_info *info) 298static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
294{ 299{
300 struct pxa3xx_nand_host *host = info->host[info->cs];
295 uint32_t ndcr; 301 uint32_t ndcr;
296 302
297 ndcr = info->reg_ndcr; 303 ndcr = host->reg_ndcr;
298 ndcr |= info->use_ecc ? NDCR_ECC_EN : 0; 304 ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
299 ndcr |= info->use_dma ? NDCR_DMA_EN : 0; 305 ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
300 ndcr |= NDCR_ND_RUN; 306 ndcr |= NDCR_ND_RUN;
@@ -359,7 +365,7 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
359 DIV_ROUND_UP(info->oob_size, 4)); 365 DIV_ROUND_UP(info->oob_size, 4));
360 break; 366 break;
361 default: 367 default:
362 printk(KERN_ERR "%s: invalid state %d\n", __func__, 368 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
363 info->state); 369 info->state);
364 BUG(); 370 BUG();
365 } 371 }
@@ -385,7 +391,7 @@ static void start_data_dma(struct pxa3xx_nand_info *info)
385 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC; 391 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
386 break; 392 break;
387 default: 393 default:
388 printk(KERN_ERR "%s: invalid state %d\n", __func__, 394 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
389 info->state); 395 info->state);
390 BUG(); 396 BUG();
391 } 397 }
@@ -416,6 +422,15 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
416{ 422{
417 struct pxa3xx_nand_info *info = devid; 423 struct pxa3xx_nand_info *info = devid;
418 unsigned int status, is_completed = 0; 424 unsigned int status, is_completed = 0;
425 unsigned int ready, cmd_done;
426
427 if (info->cs == 0) {
428 ready = NDSR_FLASH_RDY;
429 cmd_done = NDSR_CS0_CMDD;
430 } else {
431 ready = NDSR_RDY;
432 cmd_done = NDSR_CS1_CMDD;
433 }
419 434
420 status = nand_readl(info, NDSR); 435 status = nand_readl(info, NDSR);
421 436
@@ -437,11 +452,11 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
437 handle_data_pio(info); 452 handle_data_pio(info);
438 } 453 }
439 } 454 }
440 if (status & NDSR_CS0_CMDD) { 455 if (status & cmd_done) {
441 info->state = STATE_CMD_DONE; 456 info->state = STATE_CMD_DONE;
442 is_completed = 1; 457 is_completed = 1;
443 } 458 }
444 if (status & NDSR_FLASH_RDY) { 459 if (status & ready) {
445 info->is_ready = 1; 460 info->is_ready = 1;
446 info->state = STATE_READY; 461 info->state = STATE_READY;
447 } 462 }
@@ -463,12 +478,6 @@ NORMAL_IRQ_EXIT:
463 return IRQ_HANDLED; 478 return IRQ_HANDLED;
464} 479}
465 480
466static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
467{
468 struct pxa3xx_nand_info *info = mtd->priv;
469 return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0;
470}
471
472static inline int is_buf_blank(uint8_t *buf, size_t len) 481static inline int is_buf_blank(uint8_t *buf, size_t len)
473{ 482{
474 for (; len > 0; len--) 483 for (; len > 0; len--)
@@ -481,10 +490,12 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
481 uint16_t column, int page_addr) 490 uint16_t column, int page_addr)
482{ 491{
483 uint16_t cmd; 492 uint16_t cmd;
484 int addr_cycle, exec_cmd, ndcb0; 493 int addr_cycle, exec_cmd;
485 struct mtd_info *mtd = info->mtd; 494 struct pxa3xx_nand_host *host;
495 struct mtd_info *mtd;
486 496
487 ndcb0 = 0; 497 host = info->host[info->cs];
498 mtd = host->mtd;
488 addr_cycle = 0; 499 addr_cycle = 0;
489 exec_cmd = 1; 500 exec_cmd = 1;
490 501
@@ -495,6 +506,10 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
495 info->use_ecc = 0; 506 info->use_ecc = 0;
496 info->is_ready = 0; 507 info->is_ready = 0;
497 info->retcode = ERR_NONE; 508 info->retcode = ERR_NONE;
509 if (info->cs != 0)
510 info->ndcb0 = NDCB0_CSEL;
511 else
512 info->ndcb0 = 0;
498 513
499 switch (command) { 514 switch (command) {
500 case NAND_CMD_READ0: 515 case NAND_CMD_READ0:
@@ -512,20 +527,19 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
512 break; 527 break;
513 } 528 }
514 529
515 info->ndcb0 = ndcb0; 530 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
516 addr_cycle = NDCB0_ADDR_CYC(info->row_addr_cycles 531 + host->col_addr_cycles);
517 + info->col_addr_cycles);
518 532
519 switch (command) { 533 switch (command) {
520 case NAND_CMD_READOOB: 534 case NAND_CMD_READOOB:
521 case NAND_CMD_READ0: 535 case NAND_CMD_READ0:
522 cmd = info->cmdset->read1; 536 cmd = host->cmdset->read1;
523 if (command == NAND_CMD_READOOB) 537 if (command == NAND_CMD_READOOB)
524 info->buf_start = mtd->writesize + column; 538 info->buf_start = mtd->writesize + column;
525 else 539 else
526 info->buf_start = column; 540 info->buf_start = column;
527 541
528 if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) 542 if (unlikely(host->page_size < PAGE_CHUNK_SIZE))
529 info->ndcb0 |= NDCB0_CMD_TYPE(0) 543 info->ndcb0 |= NDCB0_CMD_TYPE(0)
530 | addr_cycle 544 | addr_cycle
531 | (cmd & NDCB0_CMD1_MASK); 545 | (cmd & NDCB0_CMD1_MASK);
@@ -537,7 +551,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
537 551
538 case NAND_CMD_SEQIN: 552 case NAND_CMD_SEQIN:
539 /* small page addr setting */ 553 /* small page addr setting */
540 if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) { 554 if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) {
541 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8) 555 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
542 | (column & 0xFF); 556 | (column & 0xFF);
543 557
@@ -564,7 +578,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
564 break; 578 break;
565 } 579 }
566 580
567 cmd = info->cmdset->program; 581 cmd = host->cmdset->program;
568 info->ndcb0 |= NDCB0_CMD_TYPE(0x1) 582 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
569 | NDCB0_AUTO_RS 583 | NDCB0_AUTO_RS
570 | NDCB0_ST_ROW_EN 584 | NDCB0_ST_ROW_EN
@@ -574,8 +588,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
574 break; 588 break;
575 589
576 case NAND_CMD_READID: 590 case NAND_CMD_READID:
577 cmd = info->cmdset->read_id; 591 cmd = host->cmdset->read_id;
578 info->buf_count = info->read_id_bytes; 592 info->buf_count = host->read_id_bytes;
579 info->ndcb0 |= NDCB0_CMD_TYPE(3) 593 info->ndcb0 |= NDCB0_CMD_TYPE(3)
580 | NDCB0_ADDR_CYC(1) 594 | NDCB0_ADDR_CYC(1)
581 | cmd; 595 | cmd;
@@ -583,7 +597,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
583 info->data_size = 8; 597 info->data_size = 8;
584 break; 598 break;
585 case NAND_CMD_STATUS: 599 case NAND_CMD_STATUS:
586 cmd = info->cmdset->read_status; 600 cmd = host->cmdset->read_status;
587 info->buf_count = 1; 601 info->buf_count = 1;
588 info->ndcb0 |= NDCB0_CMD_TYPE(4) 602 info->ndcb0 |= NDCB0_CMD_TYPE(4)
589 | NDCB0_ADDR_CYC(1) 603 | NDCB0_ADDR_CYC(1)
@@ -593,7 +607,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
593 break; 607 break;
594 608
595 case NAND_CMD_ERASE1: 609 case NAND_CMD_ERASE1:
596 cmd = info->cmdset->erase; 610 cmd = host->cmdset->erase;
597 info->ndcb0 |= NDCB0_CMD_TYPE(2) 611 info->ndcb0 |= NDCB0_CMD_TYPE(2)
598 | NDCB0_AUTO_RS 612 | NDCB0_AUTO_RS
599 | NDCB0_ADDR_CYC(3) 613 | NDCB0_ADDR_CYC(3)
@@ -604,7 +618,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
604 618
605 break; 619 break;
606 case NAND_CMD_RESET: 620 case NAND_CMD_RESET:
607 cmd = info->cmdset->reset; 621 cmd = host->cmdset->reset;
608 info->ndcb0 |= NDCB0_CMD_TYPE(5) 622 info->ndcb0 |= NDCB0_CMD_TYPE(5)
609 | cmd; 623 | cmd;
610 624
@@ -616,8 +630,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
616 630
617 default: 631 default:
618 exec_cmd = 0; 632 exec_cmd = 0;
619 printk(KERN_ERR "pxa3xx-nand: non-supported" 633 dev_err(&info->pdev->dev, "non-supported command %x\n",
620 " command %x\n", command); 634 command);
621 break; 635 break;
622 } 636 }
623 637
@@ -627,7 +641,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
627static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, 641static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
628 int column, int page_addr) 642 int column, int page_addr)
629{ 643{
630 struct pxa3xx_nand_info *info = mtd->priv; 644 struct pxa3xx_nand_host *host = mtd->priv;
645 struct pxa3xx_nand_info *info = host->info_data;
631 int ret, exec_cmd; 646 int ret, exec_cmd;
632 647
633 /* 648 /*
@@ -635,9 +650,21 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
635 * "byte" address into a "word" address appropriate 650 * "byte" address into a "word" address appropriate
636 * for indexing a word-oriented device 651 * for indexing a word-oriented device
637 */ 652 */
638 if (info->reg_ndcr & NDCR_DWIDTH_M) 653 if (host->reg_ndcr & NDCR_DWIDTH_M)
639 column /= 2; 654 column /= 2;
640 655
656 /*
657 * There may be different NAND chip hooked to
658 * different chip select, so check whether
659 * chip select has been changed, if yes, reset the timing
660 */
661 if (info->cs != host->cs) {
662 info->cs = host->cs;
663 nand_writel(info, NDTR0CS0, host->ndtr0cs0);
664 nand_writel(info, NDTR1CS0, host->ndtr1cs0);
665 }
666
667 info->state = STATE_PREPARED;
641 exec_cmd = prepare_command_pool(info, command, column, page_addr); 668 exec_cmd = prepare_command_pool(info, command, column, page_addr);
642 if (exec_cmd) { 669 if (exec_cmd) {
643 init_completion(&info->cmd_complete); 670 init_completion(&info->cmd_complete);
@@ -646,12 +673,12 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
646 ret = wait_for_completion_timeout(&info->cmd_complete, 673 ret = wait_for_completion_timeout(&info->cmd_complete,
647 CHIP_DELAY_TIMEOUT); 674 CHIP_DELAY_TIMEOUT);
648 if (!ret) { 675 if (!ret) {
649 printk(KERN_ERR "Wait time out!!!\n"); 676 dev_err(&info->pdev->dev, "Wait time out!!!\n");
650 /* Stop State Machine for next command cycle */ 677 /* Stop State Machine for next command cycle */
651 pxa3xx_nand_stop(info); 678 pxa3xx_nand_stop(info);
652 } 679 }
653 info->state = STATE_IDLE;
654 } 680 }
681 info->state = STATE_IDLE;
655} 682}
656 683
657static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, 684static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
@@ -664,7 +691,8 @@ static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
664static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, 691static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
665 struct nand_chip *chip, uint8_t *buf, int page) 692 struct nand_chip *chip, uint8_t *buf, int page)
666{ 693{
667 struct pxa3xx_nand_info *info = mtd->priv; 694 struct pxa3xx_nand_host *host = mtd->priv;
695 struct pxa3xx_nand_info *info = host->info_data;
668 696
669 chip->read_buf(mtd, buf, mtd->writesize); 697 chip->read_buf(mtd, buf, mtd->writesize);
670 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 698 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -685,6 +713,8 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
685 * OOB, ignore such double bit errors 713 * OOB, ignore such double bit errors
686 */ 714 */
687 if (is_buf_blank(buf, mtd->writesize)) 715 if (is_buf_blank(buf, mtd->writesize))
716 info->retcode = ERR_NONE;
717 else
688 mtd->ecc_stats.failed++; 718 mtd->ecc_stats.failed++;
689 } 719 }
690 720
@@ -693,7 +723,8 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
693 723
694static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) 724static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
695{ 725{
696 struct pxa3xx_nand_info *info = mtd->priv; 726 struct pxa3xx_nand_host *host = mtd->priv;
727 struct pxa3xx_nand_info *info = host->info_data;
697 char retval = 0xFF; 728 char retval = 0xFF;
698 729
699 if (info->buf_start < info->buf_count) 730 if (info->buf_start < info->buf_count)
@@ -705,7 +736,8 @@ static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
705 736
706static u16 pxa3xx_nand_read_word(struct mtd_info *mtd) 737static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
707{ 738{
708 struct pxa3xx_nand_info *info = mtd->priv; 739 struct pxa3xx_nand_host *host = mtd->priv;
740 struct pxa3xx_nand_info *info = host->info_data;
709 u16 retval = 0xFFFF; 741 u16 retval = 0xFFFF;
710 742
711 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) { 743 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
@@ -717,7 +749,8 @@ static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
717 749
718static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 750static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
719{ 751{
720 struct pxa3xx_nand_info *info = mtd->priv; 752 struct pxa3xx_nand_host *host = mtd->priv;
753 struct pxa3xx_nand_info *info = host->info_data;
721 int real_len = min_t(size_t, len, info->buf_count - info->buf_start); 754 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
722 755
723 memcpy(buf, info->data_buff + info->buf_start, real_len); 756 memcpy(buf, info->data_buff + info->buf_start, real_len);
@@ -727,7 +760,8 @@ static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
727static void pxa3xx_nand_write_buf(struct mtd_info *mtd, 760static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
728 const uint8_t *buf, int len) 761 const uint8_t *buf, int len)
729{ 762{
730 struct pxa3xx_nand_info *info = mtd->priv; 763 struct pxa3xx_nand_host *host = mtd->priv;
764 struct pxa3xx_nand_info *info = host->info_data;
731 int real_len = min_t(size_t, len, info->buf_count - info->buf_start); 765 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
732 766
733 memcpy(info->data_buff + info->buf_start, buf, real_len); 767 memcpy(info->data_buff + info->buf_start, buf, real_len);
@@ -747,7 +781,8 @@ static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
747 781
748static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this) 782static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
749{ 783{
750 struct pxa3xx_nand_info *info = mtd->priv; 784 struct pxa3xx_nand_host *host = mtd->priv;
785 struct pxa3xx_nand_info *info = host->info_data;
751 786
752 /* pxa3xx_nand_send_command has waited for command complete */ 787 /* pxa3xx_nand_send_command has waited for command complete */
753 if (this->state == FL_WRITING || this->state == FL_ERASING) { 788 if (this->state == FL_WRITING || this->state == FL_ERASING) {
@@ -770,54 +805,70 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
770{ 805{
771 struct platform_device *pdev = info->pdev; 806 struct platform_device *pdev = info->pdev;
772 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; 807 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
808 struct pxa3xx_nand_host *host = info->host[info->cs];
773 uint32_t ndcr = 0x0; /* enable all interrupts */ 809 uint32_t ndcr = 0x0; /* enable all interrupts */
774 810
775 if (f->page_size != 2048 && f->page_size != 512) 811 if (f->page_size != 2048 && f->page_size != 512) {
812 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
776 return -EINVAL; 813 return -EINVAL;
814 }
777 815
778 if (f->flash_width != 16 && f->flash_width != 8) 816 if (f->flash_width != 16 && f->flash_width != 8) {
817 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
779 return -EINVAL; 818 return -EINVAL;
819 }
780 820
781 /* calculate flash information */ 821 /* calculate flash information */
782 info->cmdset = &default_cmdset; 822 host->cmdset = &default_cmdset;
783 info->page_size = f->page_size; 823 host->page_size = f->page_size;
784 info->read_id_bytes = (f->page_size == 2048) ? 4 : 2; 824 host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
785 825
786 /* calculate addressing information */ 826 /* calculate addressing information */
787 info->col_addr_cycles = (f->page_size == 2048) ? 2 : 1; 827 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
788 828
789 if (f->num_blocks * f->page_per_block > 65536) 829 if (f->num_blocks * f->page_per_block > 65536)
790 info->row_addr_cycles = 3; 830 host->row_addr_cycles = 3;
791 else 831 else
792 info->row_addr_cycles = 2; 832 host->row_addr_cycles = 2;
793 833
794 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; 834 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
795 ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0; 835 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
796 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0; 836 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
797 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0; 837 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
798 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0; 838 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
799 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0; 839 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
800 840
801 ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes); 841 ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
802 ndcr |= NDCR_SPARE_EN; /* enable spare by default */ 842 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
803 843
804 info->reg_ndcr = ndcr; 844 host->reg_ndcr = ndcr;
805 845
806 pxa3xx_nand_set_timing(info, f->timing); 846 pxa3xx_nand_set_timing(host, f->timing);
807 return 0; 847 return 0;
808} 848}
809 849
810static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) 850static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
811{ 851{
852 /*
853 * We set 0 by hard coding here, for we don't support keep_config
854 * when there is more than one chip attached to the controller
855 */
856 struct pxa3xx_nand_host *host = info->host[0];
812 uint32_t ndcr = nand_readl(info, NDCR); 857 uint32_t ndcr = nand_readl(info, NDCR);
813 info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
814 /* set info fields needed to read id */
815 info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
816 info->reg_ndcr = ndcr;
817 info->cmdset = &default_cmdset;
818 858
819 info->ndtr0cs0 = nand_readl(info, NDTR0CS0); 859 if (ndcr & NDCR_PAGE_SZ) {
820 info->ndtr1cs0 = nand_readl(info, NDTR1CS0); 860 host->page_size = 2048;
861 host->read_id_bytes = 4;
862 } else {
863 host->page_size = 512;
864 host->read_id_bytes = 2;
865 }
866
867 host->reg_ndcr = ndcr & ~NDCR_INT_MASK;
868 host->cmdset = &default_cmdset;
869
870 host->ndtr0cs0 = nand_readl(info, NDTR0CS0);
871 host->ndtr1cs0 = nand_readl(info, NDTR1CS0);
821 872
822 return 0; 873 return 0;
823} 874}
@@ -847,7 +898,6 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
847 return -ENOMEM; 898 return -ENOMEM;
848 } 899 }
849 900
850 info->data_buff_size = MAX_BUFF_SIZE;
851 info->data_desc = (void *)info->data_buff + data_desc_offset; 901 info->data_desc = (void *)info->data_buff + data_desc_offset;
852 info->data_desc_addr = info->data_buff_phys + data_desc_offset; 902 info->data_desc_addr = info->data_buff_phys + data_desc_offset;
853 903
@@ -855,7 +905,7 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
855 pxa3xx_nand_data_dma_irq, info); 905 pxa3xx_nand_data_dma_irq, info);
856 if (info->data_dma_ch < 0) { 906 if (info->data_dma_ch < 0) {
857 dev_err(&pdev->dev, "failed to request data dma\n"); 907 dev_err(&pdev->dev, "failed to request data dma\n");
858 dma_free_coherent(&pdev->dev, info->data_buff_size, 908 dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
859 info->data_buff, info->data_buff_phys); 909 info->data_buff, info->data_buff_phys);
860 return info->data_dma_ch; 910 return info->data_dma_ch;
861 } 911 }
@@ -865,24 +915,28 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
865 915
866static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info) 916static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
867{ 917{
868 struct mtd_info *mtd = info->mtd; 918 struct mtd_info *mtd;
869 struct nand_chip *chip = mtd->priv; 919 int ret;
870 920 mtd = info->host[info->cs]->mtd;
871 /* use the common timing to make a try */ 921 /* use the common timing to make a try */
872 pxa3xx_nand_config_flash(info, &builtin_flash_types[0]); 922 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
873 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0); 923 if (ret)
924 return ret;
925
926 pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
874 if (info->is_ready) 927 if (info->is_ready)
875 return 1;
876 else
877 return 0; 928 return 0;
929
930 return -ENODEV;
878} 931}
879 932
880static int pxa3xx_nand_scan(struct mtd_info *mtd) 933static int pxa3xx_nand_scan(struct mtd_info *mtd)
881{ 934{
882 struct pxa3xx_nand_info *info = mtd->priv; 935 struct pxa3xx_nand_host *host = mtd->priv;
936 struct pxa3xx_nand_info *info = host->info_data;
883 struct platform_device *pdev = info->pdev; 937 struct platform_device *pdev = info->pdev;
884 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; 938 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
885 struct nand_flash_dev pxa3xx_flash_ids[2] = { {NULL,}, {NULL,} }; 939 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
886 const struct pxa3xx_nand_flash *f = NULL; 940 const struct pxa3xx_nand_flash *f = NULL;
887 struct nand_chip *chip = mtd->priv; 941 struct nand_chip *chip = mtd->priv;
888 uint32_t id = -1; 942 uint32_t id = -1;
@@ -893,22 +947,20 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
893 goto KEEP_CONFIG; 947 goto KEEP_CONFIG;
894 948
895 ret = pxa3xx_nand_sensing(info); 949 ret = pxa3xx_nand_sensing(info);
896 if (!ret) { 950 if (ret) {
897 kfree(mtd); 951 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
898 info->mtd = NULL; 952 info->cs);
899 printk(KERN_INFO "There is no nand chip on cs 0!\n");
900 953
901 return -EINVAL; 954 return ret;
902 } 955 }
903 956
904 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0); 957 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
905 id = *((uint16_t *)(info->data_buff)); 958 id = *((uint16_t *)(info->data_buff));
906 if (id != 0) 959 if (id != 0)
907 printk(KERN_INFO "Detect a flash id %x\n", id); 960 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
908 else { 961 else {
909 kfree(mtd); 962 dev_warn(&info->pdev->dev,
910 info->mtd = NULL; 963 "Read out ID 0, potential timing set wrong!!\n");
911 printk(KERN_WARNING "Read out ID 0, potential timing set wrong!!\n");
912 964
913 return -EINVAL; 965 return -EINVAL;
914 } 966 }
@@ -926,14 +978,17 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
926 } 978 }
927 979
928 if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) { 980 if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
929 kfree(mtd); 981 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
930 info->mtd = NULL;
931 printk(KERN_ERR "ERROR!! flash not defined!!!\n");
932 982
933 return -EINVAL; 983 return -EINVAL;
934 } 984 }
935 985
936 pxa3xx_nand_config_flash(info, f); 986 ret = pxa3xx_nand_config_flash(info, f);
987 if (ret) {
988 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
989 return ret;
990 }
991
937 pxa3xx_flash_ids[0].name = f->name; 992 pxa3xx_flash_ids[0].name = f->name;
938 pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff; 993 pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff;
939 pxa3xx_flash_ids[0].pagesize = f->page_size; 994 pxa3xx_flash_ids[0].pagesize = f->page_size;
@@ -942,62 +997,78 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
942 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block; 997 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
943 if (f->flash_width == 16) 998 if (f->flash_width == 16)
944 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16; 999 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1000 pxa3xx_flash_ids[1].name = NULL;
1001 def = pxa3xx_flash_ids;
945KEEP_CONFIG: 1002KEEP_CONFIG:
946 if (nand_scan_ident(mtd, 1, pxa3xx_flash_ids)) 1003 chip->ecc.mode = NAND_ECC_HW;
1004 chip->ecc.size = host->page_size;
1005
1006 chip->options = NAND_NO_AUTOINCR;
1007 chip->options |= NAND_NO_READRDY;
1008 if (host->reg_ndcr & NDCR_DWIDTH_M)
1009 chip->options |= NAND_BUSWIDTH_16;
1010
1011 if (nand_scan_ident(mtd, 1, def))
947 return -ENODEV; 1012 return -ENODEV;
948 /* calculate addressing information */ 1013 /* calculate addressing information */
949 info->col_addr_cycles = (mtd->writesize >= 2048) ? 2 : 1; 1014 if (mtd->writesize >= 2048)
1015 host->col_addr_cycles = 2;
1016 else
1017 host->col_addr_cycles = 1;
1018
950 info->oob_buff = info->data_buff + mtd->writesize; 1019 info->oob_buff = info->data_buff + mtd->writesize;
951 if ((mtd->size >> chip->page_shift) > 65536) 1020 if ((mtd->size >> chip->page_shift) > 65536)
952 info->row_addr_cycles = 3; 1021 host->row_addr_cycles = 3;
953 else 1022 else
954 info->row_addr_cycles = 2; 1023 host->row_addr_cycles = 2;
955 mtd->name = mtd_names[0];
956 chip->ecc.mode = NAND_ECC_HW;
957 chip->ecc.size = f->page_size;
958
959 chip->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16 : 0;
960 chip->options |= NAND_NO_AUTOINCR;
961 chip->options |= NAND_NO_READRDY;
962 1024
1025 mtd->name = mtd_names[0];
963 return nand_scan_tail(mtd); 1026 return nand_scan_tail(mtd);
964} 1027}
965 1028
966static 1029static int alloc_nand_resource(struct platform_device *pdev)
967struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev)
968{ 1030{
1031 struct pxa3xx_nand_platform_data *pdata;
969 struct pxa3xx_nand_info *info; 1032 struct pxa3xx_nand_info *info;
1033 struct pxa3xx_nand_host *host;
970 struct nand_chip *chip; 1034 struct nand_chip *chip;
971 struct mtd_info *mtd; 1035 struct mtd_info *mtd;
972 struct resource *r; 1036 struct resource *r;
973 int ret, irq; 1037 int ret, irq, cs;
974 1038
975 mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info), 1039 pdata = pdev->dev.platform_data;
976 GFP_KERNEL); 1040 info = kzalloc(sizeof(*info) + (sizeof(*mtd) +
977 if (!mtd) { 1041 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1042 if (!info) {
978 dev_err(&pdev->dev, "failed to allocate memory\n"); 1043 dev_err(&pdev->dev, "failed to allocate memory\n");
979 return NULL; 1044 return -ENOMEM;
980 } 1045 }
981 1046
982 info = (struct pxa3xx_nand_info *)(&mtd[1]);
983 chip = (struct nand_chip *)(&mtd[1]);
984 info->pdev = pdev; 1047 info->pdev = pdev;
985 info->mtd = mtd; 1048 for (cs = 0; cs < pdata->num_cs; cs++) {
986 mtd->priv = info; 1049 mtd = (struct mtd_info *)((unsigned int)&info[1] +
987 mtd->owner = THIS_MODULE; 1050 (sizeof(*mtd) + sizeof(*host)) * cs);
988 1051 chip = (struct nand_chip *)(&mtd[1]);
989 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc; 1052 host = (struct pxa3xx_nand_host *)chip;
990 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc; 1053 info->host[cs] = host;
991 chip->controller = &info->controller; 1054 host->mtd = mtd;
992 chip->waitfunc = pxa3xx_nand_waitfunc; 1055 host->cs = cs;
993 chip->select_chip = pxa3xx_nand_select_chip; 1056 host->info_data = info;
994 chip->dev_ready = pxa3xx_nand_dev_ready; 1057 mtd->priv = host;
995 chip->cmdfunc = pxa3xx_nand_cmdfunc; 1058 mtd->owner = THIS_MODULE;
996 chip->read_word = pxa3xx_nand_read_word; 1059
997 chip->read_byte = pxa3xx_nand_read_byte; 1060 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
998 chip->read_buf = pxa3xx_nand_read_buf; 1061 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
999 chip->write_buf = pxa3xx_nand_write_buf; 1062 chip->controller = &info->controller;
1000 chip->verify_buf = pxa3xx_nand_verify_buf; 1063 chip->waitfunc = pxa3xx_nand_waitfunc;
1064 chip->select_chip = pxa3xx_nand_select_chip;
1065 chip->cmdfunc = pxa3xx_nand_cmdfunc;
1066 chip->read_word = pxa3xx_nand_read_word;
1067 chip->read_byte = pxa3xx_nand_read_byte;
1068 chip->read_buf = pxa3xx_nand_read_buf;
1069 chip->write_buf = pxa3xx_nand_write_buf;
1070 chip->verify_buf = pxa3xx_nand_verify_buf;
1071 }
1001 1072
1002 spin_lock_init(&chip->controller->lock); 1073 spin_lock_init(&chip->controller->lock);
1003 init_waitqueue_head(&chip->controller->wq); 1074 init_waitqueue_head(&chip->controller->wq);
@@ -1070,13 +1141,13 @@ struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev)
1070 1141
1071 platform_set_drvdata(pdev, info); 1142 platform_set_drvdata(pdev, info);
1072 1143
1073 return info; 1144 return 0;
1074 1145
1075fail_free_buf: 1146fail_free_buf:
1076 free_irq(irq, info); 1147 free_irq(irq, info);
1077 if (use_dma) { 1148 if (use_dma) {
1078 pxa_free_dma(info->data_dma_ch); 1149 pxa_free_dma(info->data_dma_ch);
1079 dma_free_coherent(&pdev->dev, info->data_buff_size, 1150 dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
1080 info->data_buff, info->data_buff_phys); 1151 info->data_buff, info->data_buff_phys);
1081 } else 1152 } else
1082 kfree(info->data_buff); 1153 kfree(info->data_buff);
@@ -1088,17 +1159,21 @@ fail_put_clk:
1088 clk_disable(info->clk); 1159 clk_disable(info->clk);
1089 clk_put(info->clk); 1160 clk_put(info->clk);
1090fail_free_mtd: 1161fail_free_mtd:
1091 kfree(mtd); 1162 kfree(info);
1092 return NULL; 1163 return ret;
1093} 1164}
1094 1165
1095static int pxa3xx_nand_remove(struct platform_device *pdev) 1166static int pxa3xx_nand_remove(struct platform_device *pdev)
1096{ 1167{
1097 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); 1168 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1098 struct mtd_info *mtd = info->mtd; 1169 struct pxa3xx_nand_platform_data *pdata;
1099 struct resource *r; 1170 struct resource *r;
1100 int irq; 1171 int irq, cs;
1101 1172
1173 if (!info)
1174 return 0;
1175
1176 pdata = pdev->dev.platform_data;
1102 platform_set_drvdata(pdev, NULL); 1177 platform_set_drvdata(pdev, NULL);
1103 1178
1104 irq = platform_get_irq(pdev, 0); 1179 irq = platform_get_irq(pdev, 0);
@@ -1106,7 +1181,7 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
1106 free_irq(irq, info); 1181 free_irq(irq, info);
1107 if (use_dma) { 1182 if (use_dma) {
1108 pxa_free_dma(info->data_dma_ch); 1183 pxa_free_dma(info->data_dma_ch);
1109 dma_free_writecombine(&pdev->dev, info->data_buff_size, 1184 dma_free_writecombine(&pdev->dev, MAX_BUFF_SIZE,
1110 info->data_buff, info->data_buff_phys); 1185 info->data_buff, info->data_buff_phys);
1111 } else 1186 } else
1112 kfree(info->data_buff); 1187 kfree(info->data_buff);
@@ -1118,10 +1193,9 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
1118 clk_disable(info->clk); 1193 clk_disable(info->clk);
1119 clk_put(info->clk); 1194 clk_put(info->clk);
1120 1195
1121 if (mtd) { 1196 for (cs = 0; cs < pdata->num_cs; cs++)
1122 mtd_device_unregister(mtd); 1197 nand_release(info->host[cs]->mtd);
1123 kfree(mtd); 1198 kfree(info);
1124 }
1125 return 0; 1199 return 0;
1126} 1200}
1127 1201
@@ -1129,6 +1203,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1129{ 1203{
1130 struct pxa3xx_nand_platform_data *pdata; 1204 struct pxa3xx_nand_platform_data *pdata;
1131 struct pxa3xx_nand_info *info; 1205 struct pxa3xx_nand_info *info;
1206 int ret, cs, probe_success;
1132 1207
1133 pdata = pdev->dev.platform_data; 1208 pdata = pdev->dev.platform_data;
1134 if (!pdata) { 1209 if (!pdata) {
@@ -1136,52 +1211,88 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1136 return -ENODEV; 1211 return -ENODEV;
1137 } 1212 }
1138 1213
1139 info = alloc_nand_resource(pdev); 1214 ret = alloc_nand_resource(pdev);
1140 if (info == NULL) 1215 if (ret) {
1141 return -ENOMEM; 1216 dev_err(&pdev->dev, "alloc nand resource failed\n");
1142 1217 return ret;
1143 if (pxa3xx_nand_scan(info->mtd)) {
1144 dev_err(&pdev->dev, "failed to scan nand\n");
1145 pxa3xx_nand_remove(pdev);
1146 return -ENODEV;
1147 } 1218 }
1148 1219
1149 if (mtd_has_cmdlinepart()) { 1220 info = platform_get_drvdata(pdev);
1150 const char *probes[] = { "cmdlinepart", NULL }; 1221 probe_success = 0;
1151 struct mtd_partition *parts; 1222 for (cs = 0; cs < pdata->num_cs; cs++) {
1152 int nr_parts; 1223 info->cs = cs;
1224 ret = pxa3xx_nand_scan(info->host[cs]->mtd);
1225 if (ret) {
1226 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1227 cs);
1228 continue;
1229 }
1153 1230
1154 nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0); 1231 ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, 0,
1232 pdata->parts[cs], pdata->nr_parts[cs]);
1233 if (!ret)
1234 probe_success = 1;
1235 }
1155 1236
1156 if (nr_parts) 1237 if (!probe_success) {
1157 return mtd_device_register(info->mtd, parts, nr_parts); 1238 pxa3xx_nand_remove(pdev);
1239 return -ENODEV;
1158 } 1240 }
1159 1241
1160 return mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts); 1242 return 0;
1161} 1243}
1162 1244
1163#ifdef CONFIG_PM 1245#ifdef CONFIG_PM
1164static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) 1246static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1165{ 1247{
1166 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); 1248 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1167 struct mtd_info *mtd = info->mtd; 1249 struct pxa3xx_nand_platform_data *pdata;
1250 struct mtd_info *mtd;
1251 int cs;
1168 1252
1253 pdata = pdev->dev.platform_data;
1169 if (info->state) { 1254 if (info->state) {
1170 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state); 1255 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1171 return -EAGAIN; 1256 return -EAGAIN;
1172 } 1257 }
1173 1258
1259 for (cs = 0; cs < pdata->num_cs; cs++) {
1260 mtd = info->host[cs]->mtd;
1261 mtd->suspend(mtd);
1262 }
1263
1174 return 0; 1264 return 0;
1175} 1265}
1176 1266
1177static int pxa3xx_nand_resume(struct platform_device *pdev) 1267static int pxa3xx_nand_resume(struct platform_device *pdev)
1178{ 1268{
1179 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); 1269 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1180 struct mtd_info *mtd = info->mtd; 1270 struct pxa3xx_nand_platform_data *pdata;
1271 struct mtd_info *mtd;
1272 int cs;
1181 1273
1182 nand_writel(info, NDTR0CS0, info->ndtr0cs0); 1274 pdata = pdev->dev.platform_data;
1183 nand_writel(info, NDTR1CS0, info->ndtr1cs0); 1275 /* We don't want to handle interrupt without calling mtd routine */
1184 clk_enable(info->clk); 1276 disable_int(info, NDCR_INT_MASK);
1277
1278 /*
1279 * Directly set the chip select to a invalid value,
1280 * then the driver would reset the timing according
1281 * to current chip select at the beginning of cmdfunc
1282 */
1283 info->cs = 0xff;
1284
1285 /*
1286 * As the spec says, the NDSR would be updated to 0x1800 when
1287 * doing the nand_clk disable/enable.
1288 * To prevent it damaging state machine of the driver, clear
1289 * all status before resume
1290 */
1291 nand_writel(info, NDSR, NDSR_MASK);
1292 for (cs = 0; cs < pdata->num_cs; cs++) {
1293 mtd = info->host[cs]->mtd;
1294 mtd->resume(mtd);
1295 }
1185 1296
1186 return 0; 1297 return 0;
1187} 1298}
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index cae2e013c986..f20f393bfda6 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -1027,7 +1027,7 @@ void r852_shutdown(struct pci_dev *pci_dev)
1027} 1027}
1028 1028
1029#ifdef CONFIG_PM 1029#ifdef CONFIG_PM
1030int r852_suspend(struct device *device) 1030static int r852_suspend(struct device *device)
1031{ 1031{
1032 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); 1032 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
1033 1033
@@ -1048,7 +1048,7 @@ int r852_suspend(struct device *device)
1048 return 0; 1048 return 0;
1049} 1049}
1050 1050
1051int r852_resume(struct device *device) 1051static int r852_resume(struct device *device)
1052{ 1052{
1053 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); 1053 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
1054 1054
@@ -1092,7 +1092,7 @@ static const struct pci_device_id r852_pci_id_tbl[] = {
1092 1092
1093MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl); 1093MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
1094 1094
1095SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); 1095static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
1096 1096
1097static struct pci_driver r852_pci_driver = { 1097static struct pci_driver r852_pci_driver = {
1098 .name = DRV_NAME, 1098 .name = DRV_NAME,
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index c9f9127ff770..f309addc2fa0 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -351,7 +351,7 @@ static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_cha
351 return 0; 351 return 0;
352 } 352 }
353 353
354 /* Read the syndrom pattern from the FPGA and correct the bitorder */ 354 /* Read the syndrome pattern from the FPGA and correct the bitorder */
355 rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC); 355 rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC);
356 for (i = 0; i < 8; i++) { 356 for (i = 0; i < 8; i++) {
357 ecc[i] = bitrev8(*rs_ecc); 357 ecc[i] = bitrev8(*rs_ecc);
@@ -380,7 +380,7 @@ static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_cha
380 /* Let the library code do its magic. */ 380 /* Let the library code do its magic. */
381 res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL); 381 res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL);
382 if (res > 0) { 382 if (res > 0) {
383 DEBUG(MTD_DEBUG_LEVEL0, "rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res); 383 pr_debug("rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res);
384 } 384 }
385 return res; 385 return res;
386} 386}
@@ -444,7 +444,6 @@ static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this,
444 len = mtd->writesize; 444 len = mtd->writesize;
445 buf = kmalloc(len, GFP_KERNEL); 445 buf = kmalloc(len, GFP_KERNEL);
446 if (!buf) { 446 if (!buf) {
447 printk(KERN_ERR "rtc_from4_errstat: Out of memory!\n");
448 er_stat = 1; 447 er_stat = 1;
449 goto out; 448 goto out;
450 } 449 }
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 4405468f196b..868685db6712 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -723,7 +723,7 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
723 723
724 /* free the common resources */ 724 /* free the common resources */
725 725
726 if (info->clk != NULL && !IS_ERR(info->clk)) { 726 if (!IS_ERR(info->clk)) {
727 s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); 727 s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
728 clk_put(info->clk); 728 clk_put(info->clk);
729 } 729 }
@@ -744,26 +744,15 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
744 return 0; 744 return 0;
745} 745}
746 746
747const char *part_probes[] = { "cmdlinepart", NULL };
748static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, 747static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
749 struct s3c2410_nand_mtd *mtd, 748 struct s3c2410_nand_mtd *mtd,
750 struct s3c2410_nand_set *set) 749 struct s3c2410_nand_set *set)
751{ 750{
752 struct mtd_partition *part_info; 751 if (set)
753 int nr_part = 0; 752 mtd->mtd.name = set->name;
754 753
755 if (set == NULL) 754 return mtd_device_parse_register(&mtd->mtd, NULL, 0,
756 return mtd_device_register(&mtd->mtd, NULL, 0); 755 set->partitions, set->nr_partitions);
757
758 mtd->mtd.name = set->name;
759 nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0);
760
761 if (nr_part <= 0 && set->nr_partitions > 0) {
762 nr_part = set->nr_partitions;
763 part_info = set->partitions;
764 }
765
766 return mtd_device_register(&mtd->mtd, part_info, nr_part);
767} 756}
768 757
769/** 758/**
@@ -880,8 +869,10 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
880 /* If you use u-boot BBT creation code, specifying this flag will 869 /* If you use u-boot BBT creation code, specifying this flag will
881 * let the kernel fish out the BBT from the NAND, and also skip the 870 * let the kernel fish out the BBT from the NAND, and also skip the
882 * full NAND scan that can take 1/2s or so. Little things... */ 871 * full NAND scan that can take 1/2s or so. Little things... */
883 if (set->flash_bbt) 872 if (set->flash_bbt) {
884 chip->options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN; 873 chip->bbt_options |= NAND_BBT_USE_FLASH;
874 chip->options |= NAND_SKIP_BBTSCAN;
875 }
885} 876}
886 877
887/** 878/**
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 19e24ed089ea..619d2a504788 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -103,16 +103,12 @@ static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat,
103 return readb(sharpsl->io + ECCCNTR) != 0; 103 return readb(sharpsl->io + ECCCNTR) != 0;
104} 104}
105 105
106static const char *part_probes[] = { "cmdlinepart", NULL };
107
108/* 106/*
109 * Main initialization routine 107 * Main initialization routine
110 */ 108 */
111static int __devinit sharpsl_nand_probe(struct platform_device *pdev) 109static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
112{ 110{
113 struct nand_chip *this; 111 struct nand_chip *this;
114 struct mtd_partition *sharpsl_partition_info;
115 int nr_partitions;
116 struct resource *r; 112 struct resource *r;
117 int err = 0; 113 int err = 0;
118 struct sharpsl_nand *sharpsl; 114 struct sharpsl_nand *sharpsl;
@@ -184,14 +180,9 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
184 180
185 /* Register the partitions */ 181 /* Register the partitions */
186 sharpsl->mtd.name = "sharpsl-nand"; 182 sharpsl->mtd.name = "sharpsl-nand";
187 nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0);
188 if (nr_partitions <= 0) {
189 nr_partitions = data->nr_partitions;
190 sharpsl_partition_info = data->partitions;
191 }
192 183
193 err = mtd_device_register(&sharpsl->mtd, sharpsl_partition_info, 184 err = mtd_device_parse_register(&sharpsl->mtd, NULL, 0,
194 nr_partitions); 185 data->partitions, data->nr_partitions);
195 if (err) 186 if (err)
196 goto err_add; 187 goto err_add;
197 188
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 43469715b3fa..32ae5af7444f 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -48,7 +48,7 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
48 48
49 /* As long as this function is called on erase block boundaries 49 /* As long as this function is called on erase block boundaries
50 it will work correctly for 256 byte nand */ 50 it will work correctly for 256 byte nand */
51 ops.mode = MTD_OOB_PLACE; 51 ops.mode = MTD_OPS_PLACE_OOB;
52 ops.ooboffs = 0; 52 ops.ooboffs = 0;
53 ops.ooblen = mtd->oobsize; 53 ops.ooblen = mtd->oobsize;
54 ops.oobbuf = (void *)&oob; 54 ops.oobbuf = (void *)&oob;
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index ca2d0555729e..0fb24f9c2327 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -155,8 +155,6 @@ static int socrates_nand_device_ready(struct mtd_info *mtd)
155 return 1; 155 return 1;
156} 156}
157 157
158static const char *part_probes[] = { "cmdlinepart", NULL };
159
160/* 158/*
161 * Probe for the NAND device. 159 * Probe for the NAND device.
162 */ 160 */
@@ -166,8 +164,7 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
166 struct mtd_info *mtd; 164 struct mtd_info *mtd;
167 struct nand_chip *nand_chip; 165 struct nand_chip *nand_chip;
168 int res; 166 int res;
169 struct mtd_partition *partitions = NULL; 167 struct mtd_part_parser_data ppdata;
170 int num_partitions = 0;
171 168
172 /* Allocate memory for the device structure (and zero it) */ 169 /* Allocate memory for the device structure (and zero it) */
173 host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL); 170 host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL);
@@ -193,6 +190,7 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
193 mtd->name = "socrates_nand"; 190 mtd->name = "socrates_nand";
194 mtd->owner = THIS_MODULE; 191 mtd->owner = THIS_MODULE;
195 mtd->dev.parent = &ofdev->dev; 192 mtd->dev.parent = &ofdev->dev;
193 ppdata.of_node = ofdev->dev.of_node;
196 194
197 /*should never be accessed directly */ 195 /*should never be accessed directly */
198 nand_chip->IO_ADDR_R = (void *)0xdeadbeef; 196 nand_chip->IO_ADDR_R = (void *)0xdeadbeef;
@@ -225,30 +223,10 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev)
225 goto out; 223 goto out;
226 } 224 }
227 225
228#ifdef CONFIG_MTD_CMDLINE_PARTS 226 res = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
229 num_partitions = parse_mtd_partitions(mtd, part_probes,
230 &partitions, 0);
231 if (num_partitions < 0) {
232 res = num_partitions;
233 goto release;
234 }
235#endif
236
237 if (num_partitions == 0) {
238 num_partitions = of_mtd_parse_partitions(&ofdev->dev,
239 ofdev->dev.of_node,
240 &partitions);
241 if (num_partitions < 0) {
242 res = num_partitions;
243 goto release;
244 }
245 }
246
247 res = mtd_device_register(mtd, partitions, num_partitions);
248 if (!res) 227 if (!res)
249 return res; 228 return res;
250 229
251release:
252 nand_release(mtd); 230 nand_release(mtd);
253 231
254out: 232out:
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 11e8371b5683..beebd95f7690 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -121,9 +121,6 @@ struct tmio_nand {
121 121
122#define mtd_to_tmio(m) container_of(m, struct tmio_nand, mtd) 122#define mtd_to_tmio(m) container_of(m, struct tmio_nand, mtd)
123 123
124#ifdef CONFIG_MTD_CMDLINE_PARTS
125static const char *part_probes[] = { "cmdlinepart", NULL };
126#endif
127 124
128/*--------------------------------------------------------------------------*/ 125/*--------------------------------------------------------------------------*/
129 126
@@ -381,8 +378,6 @@ static int tmio_probe(struct platform_device *dev)
381 struct tmio_nand *tmio; 378 struct tmio_nand *tmio;
382 struct mtd_info *mtd; 379 struct mtd_info *mtd;
383 struct nand_chip *nand_chip; 380 struct nand_chip *nand_chip;
384 struct mtd_partition *parts;
385 int nbparts = 0;
386 int retval; 381 int retval;
387 382
388 if (data == NULL) 383 if (data == NULL)
@@ -461,15 +456,9 @@ static int tmio_probe(struct platform_device *dev)
461 goto err_scan; 456 goto err_scan;
462 } 457 }
463 /* Register the partitions */ 458 /* Register the partitions */
464#ifdef CONFIG_MTD_CMDLINE_PARTS 459 retval = mtd_device_parse_register(mtd, NULL, 0,
465 nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0); 460 data ? data->partition : NULL,
466#endif 461 data ? data->num_partitions : 0);
467 if (nbparts <= 0 && data) {
468 parts = data->partition;
469 nbparts = data->num_partitions;
470 }
471
472 retval = mtd_device_register(mtd, parts, nbparts);
473 if (!retval) 462 if (!retval)
474 return retval; 463 return retval;
475 464
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index bfba4e39a6c5..ace46fdaef58 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -74,7 +74,6 @@ struct txx9ndfmc_drvdata {
74 unsigned char hold; /* in gbusclock */ 74 unsigned char hold; /* in gbusclock */
75 unsigned char spw; /* in gbusclock */ 75 unsigned char spw; /* in gbusclock */
76 struct nand_hw_control hw_control; 76 struct nand_hw_control hw_control;
77 struct mtd_partition *parts[MAX_TXX9NDFMC_DEV];
78}; 77};
79 78
80static struct platform_device *mtd_to_platdev(struct mtd_info *mtd) 79static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
@@ -287,7 +286,6 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
287static int __init txx9ndfmc_probe(struct platform_device *dev) 286static int __init txx9ndfmc_probe(struct platform_device *dev)
288{ 287{
289 struct txx9ndfmc_platform_data *plat = dev->dev.platform_data; 288 struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
290 static const char *probes[] = { "cmdlinepart", NULL };
291 int hold, spw; 289 int hold, spw;
292 int i; 290 int i;
293 struct txx9ndfmc_drvdata *drvdata; 291 struct txx9ndfmc_drvdata *drvdata;
@@ -333,7 +331,6 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
333 struct txx9ndfmc_priv *txx9_priv; 331 struct txx9ndfmc_priv *txx9_priv;
334 struct nand_chip *chip; 332 struct nand_chip *chip;
335 struct mtd_info *mtd; 333 struct mtd_info *mtd;
336 int nr_parts;
337 334
338 if (!(plat->ch_mask & (1 << i))) 335 if (!(plat->ch_mask & (1 << i)))
339 continue; 336 continue;
@@ -393,9 +390,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
393 } 390 }
394 mtd->name = txx9_priv->mtdname; 391 mtd->name = txx9_priv->mtdname;
395 392
396 nr_parts = parse_mtd_partitions(mtd, probes, 393 mtd_device_parse_register(mtd, NULL, 0, NULL, 0);
397 &drvdata->parts[i], 0);
398 mtd_device_register(mtd, drvdata->parts[i], nr_parts);
399 drvdata->mtds[i] = mtd; 394 drvdata->mtds[i] = mtd;
400 } 395 }
401 396
@@ -421,7 +416,6 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
421 txx9_priv = chip->priv; 416 txx9_priv = chip->priv;
422 417
423 nand_release(mtd); 418 nand_release(mtd);
424 kfree(drvdata->parts[i]);
425 kfree(txx9_priv->mtdname); 419 kfree(txx9_priv->mtdname);
426 kfree(txx9_priv); 420 kfree(txx9_priv);
427 } 421 }
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index b155666acfbe..cda77b562ad4 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -63,14 +63,12 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
63 return; 63 return;
64 } 64 }
65 65
66 DEBUG(MTD_DEBUG_LEVEL1, "NFTL: add_mtd for %s\n", mtd->name); 66 pr_debug("NFTL: add_mtd for %s\n", mtd->name);
67 67
68 nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL); 68 nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
69 69
70 if (!nftl) { 70 if (!nftl)
71 printk(KERN_WARNING "NFTL: out of memory for data structures\n");
72 return; 71 return;
73 }
74 72
75 nftl->mbd.mtd = mtd; 73 nftl->mbd.mtd = mtd;
76 nftl->mbd.devnum = -1; 74 nftl->mbd.devnum = -1;
@@ -132,7 +130,7 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
132{ 130{
133 struct NFTLrecord *nftl = (void *)dev; 131 struct NFTLrecord *nftl = (void *)dev;
134 132
135 DEBUG(MTD_DEBUG_LEVEL1, "NFTL: remove_dev (i=%d)\n", dev->devnum); 133 pr_debug("NFTL: remove_dev (i=%d)\n", dev->devnum);
136 134
137 del_mtd_blktrans_dev(dev); 135 del_mtd_blktrans_dev(dev);
138 kfree(nftl->ReplUnitTable); 136 kfree(nftl->ReplUnitTable);
@@ -149,7 +147,7 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
149 struct mtd_oob_ops ops; 147 struct mtd_oob_ops ops;
150 int res; 148 int res;
151 149
152 ops.mode = MTD_OOB_PLACE; 150 ops.mode = MTD_OPS_PLACE_OOB;
153 ops.ooboffs = offs & mask; 151 ops.ooboffs = offs & mask;
154 ops.ooblen = len; 152 ops.ooblen = len;
155 ops.oobbuf = buf; 153 ops.oobbuf = buf;
@@ -170,7 +168,7 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
170 struct mtd_oob_ops ops; 168 struct mtd_oob_ops ops;
171 int res; 169 int res;
172 170
173 ops.mode = MTD_OOB_PLACE; 171 ops.mode = MTD_OPS_PLACE_OOB;
174 ops.ooboffs = offs & mask; 172 ops.ooboffs = offs & mask;
175 ops.ooblen = len; 173 ops.ooblen = len;
176 ops.oobbuf = buf; 174 ops.oobbuf = buf;
@@ -193,7 +191,7 @@ static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
193 struct mtd_oob_ops ops; 191 struct mtd_oob_ops ops;
194 int res; 192 int res;
195 193
196 ops.mode = MTD_OOB_PLACE; 194 ops.mode = MTD_OPS_PLACE_OOB;
197 ops.ooboffs = offs & mask; 195 ops.ooboffs = offs & mask;
198 ops.ooblen = mtd->oobsize; 196 ops.ooblen = mtd->oobsize;
199 ops.oobbuf = oob; 197 ops.oobbuf = oob;
@@ -220,7 +218,7 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
220 218
221 /* Normally, we force a fold to happen before we run out of free blocks completely */ 219 /* Normally, we force a fold to happen before we run out of free blocks completely */
222 if (!desperate && nftl->numfreeEUNs < 2) { 220 if (!desperate && nftl->numfreeEUNs < 2) {
223 DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n"); 221 pr_debug("NFTL_findfreeblock: there are too few free EUNs\n");
224 return BLOCK_NIL; 222 return BLOCK_NIL;
225 } 223 }
226 224
@@ -291,8 +289,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
291 if (block == 2) { 289 if (block == 2) {
292 foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1; 290 foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1;
293 if (foldmark == FOLD_MARK_IN_PROGRESS) { 291 if (foldmark == FOLD_MARK_IN_PROGRESS) {
294 DEBUG(MTD_DEBUG_LEVEL1, 292 pr_debug("Write Inhibited on EUN %d\n", thisEUN);
295 "Write Inhibited on EUN %d\n", thisEUN);
296 inplace = 0; 293 inplace = 0;
297 } else { 294 } else {
298 /* There's no other reason not to do inplace, 295 /* There's no other reason not to do inplace,
@@ -357,7 +354,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
357 if (BlockLastState[block] != SECTOR_FREE && 354 if (BlockLastState[block] != SECTOR_FREE &&
358 BlockMap[block] != BLOCK_NIL && 355 BlockMap[block] != BLOCK_NIL &&
359 BlockMap[block] != targetEUN) { 356 BlockMap[block] != targetEUN) {
360 DEBUG(MTD_DEBUG_LEVEL1, "Setting inplace to 0. VUC %d, " 357 pr_debug("Setting inplace to 0. VUC %d, "
361 "block %d was %x lastEUN, " 358 "block %d was %x lastEUN, "
362 "and is in EUN %d (%s) %d\n", 359 "and is in EUN %d (%s) %d\n",
363 thisVUC, block, BlockLastState[block], 360 thisVUC, block, BlockLastState[block],
@@ -373,14 +370,14 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
373 pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) && 370 pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) &&
374 BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] != 371 BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] !=
375 SECTOR_FREE) { 372 SECTOR_FREE) {
376 DEBUG(MTD_DEBUG_LEVEL1, "Pending write not free in EUN %d. " 373 pr_debug("Pending write not free in EUN %d. "
377 "Folding out of place.\n", targetEUN); 374 "Folding out of place.\n", targetEUN);
378 inplace = 0; 375 inplace = 0;
379 } 376 }
380 } 377 }
381 378
382 if (!inplace) { 379 if (!inplace) {
383 DEBUG(MTD_DEBUG_LEVEL1, "Cannot fold Virtual Unit Chain %d in place. " 380 pr_debug("Cannot fold Virtual Unit Chain %d in place. "
384 "Trying out-of-place\n", thisVUC); 381 "Trying out-of-place\n", thisVUC);
385 /* We need to find a targetEUN to fold into. */ 382 /* We need to find a targetEUN to fold into. */
386 targetEUN = NFTL_findfreeblock(nftl, 1); 383 targetEUN = NFTL_findfreeblock(nftl, 1);
@@ -410,7 +407,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
410 and the Erase Unit into which we are supposed to be copying. 407 and the Erase Unit into which we are supposed to be copying.
411 Go for it. 408 Go for it.
412 */ 409 */
413 DEBUG(MTD_DEBUG_LEVEL1,"Folding chain %d into unit %d\n", thisVUC, targetEUN); 410 pr_debug("Folding chain %d into unit %d\n", thisVUC, targetEUN);
414 for (block = 0; block < nftl->EraseSize / 512 ; block++) { 411 for (block = 0; block < nftl->EraseSize / 512 ; block++) {
415 unsigned char movebuf[512]; 412 unsigned char movebuf[512];
416 int ret; 413 int ret;
@@ -428,7 +425,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
428 425
429 ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512), 426 ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512),
430 512, &retlen, movebuf); 427 512, &retlen, movebuf);
431 if (ret < 0 && ret != -EUCLEAN) { 428 if (ret < 0 && !mtd_is_bitflip(ret)) {
432 ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) 429 ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block])
433 + (block * 512), 512, &retlen, 430 + (block * 512), 512, &retlen,
434 movebuf); 431 movebuf);
@@ -457,7 +454,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
457 has duplicate chains, we need to free one of the chains because it's not necessary any more. 454 has duplicate chains, we need to free one of the chains because it's not necessary any more.
458 */ 455 */
459 thisEUN = nftl->EUNtable[thisVUC]; 456 thisEUN = nftl->EUNtable[thisVUC];
460 DEBUG(MTD_DEBUG_LEVEL1,"Want to erase\n"); 457 pr_debug("Want to erase\n");
461 458
462 /* For each block in the old chain (except the targetEUN of course), 459 /* For each block in the old chain (except the targetEUN of course),
463 free it and make it available for future use */ 460 free it and make it available for future use */
@@ -570,7 +567,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
570 (writeEUN * nftl->EraseSize) + blockofs, 567 (writeEUN * nftl->EraseSize) + blockofs,
571 8, &retlen, (char *)&bci); 568 8, &retlen, (char *)&bci);
572 569
573 DEBUG(MTD_DEBUG_LEVEL2, "Status of block %d in EUN %d is %x\n", 570 pr_debug("Status of block %d in EUN %d is %x\n",
574 block , writeEUN, le16_to_cpu(bci.Status)); 571 block , writeEUN, le16_to_cpu(bci.Status));
575 572
576 status = bci.Status | bci.Status1; 573 status = bci.Status | bci.Status1;
@@ -623,7 +620,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
623 but they are reserved for when we're 620 but they are reserved for when we're
624 desperate. Well, now we're desperate. 621 desperate. Well, now we're desperate.
625 */ 622 */
626 DEBUG(MTD_DEBUG_LEVEL1, "Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC); 623 pr_debug("Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC);
627 writeEUN = NFTL_findfreeblock(nftl, 1); 624 writeEUN = NFTL_findfreeblock(nftl, 1);
628 } 625 }
629 if (writeEUN == BLOCK_NIL) { 626 if (writeEUN == BLOCK_NIL) {
@@ -776,7 +773,7 @@ static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
776 size_t retlen; 773 size_t retlen;
777 int res = mtd->read(mtd, ptr, 512, &retlen, buffer); 774 int res = mtd->read(mtd, ptr, 512, &retlen, buffer);
778 775
779 if (res < 0 && res != -EUCLEAN) 776 if (res < 0 && !mtd_is_bitflip(res))
780 return -EIO; 777 return -EIO;
781 } 778 }
782 return 0; 779 return 0;
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index e3cd1ffad2f6..ac4092591aea 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -32,7 +32,7 @@
32 32
33/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the 33/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
34 * various device information of the NFTL partition and Bad Unit Table. Update 34 * various device information of the NFTL partition and Bad Unit Table. Update
35 * the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[] 35 * the ReplUnitTable[] table according to the Bad Unit Table. ReplUnitTable[]
36 * is used for management of Erase Unit in other routines in nftl.c and nftlmount.c 36 * is used for management of Erase Unit in other routines in nftl.c and nftlmount.c
37 */ 37 */
38static int find_boot_record(struct NFTLrecord *nftl) 38static int find_boot_record(struct NFTLrecord *nftl)
@@ -297,7 +297,7 @@ static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int
297 * 297 *
298 * Return: 0 when succeed, -1 on error. 298 * Return: 0 when succeed, -1 on error.
299 * 299 *
300 * ToDo: 1. Is it neceressary to check_free_sector after erasing ?? 300 * ToDo: 1. Is it necessary to check_free_sector after erasing ??
301 */ 301 */
302int NFTL_formatblock(struct NFTLrecord *nftl, int block) 302int NFTL_formatblock(struct NFTLrecord *nftl, int block)
303{ 303{
@@ -337,7 +337,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
337 nb_erases = le32_to_cpu(uci.WearInfo); 337 nb_erases = le32_to_cpu(uci.WearInfo);
338 nb_erases++; 338 nb_erases++;
339 339
340 /* wrap (almost impossible with current flashs) or free block */ 340 /* wrap (almost impossible with current flash) or free block */
341 if (nb_erases == 0) 341 if (nb_erases == 0)
342 nb_erases = 1; 342 nb_erases = 1;
343 343
@@ -363,10 +363,10 @@ fail:
363 * Mark as 'IGNORE' each incorrect sector. This check is only done if the chain 363 * Mark as 'IGNORE' each incorrect sector. This check is only done if the chain
364 * was being folded when NFTL was interrupted. 364 * was being folded when NFTL was interrupted.
365 * 365 *
366 * The check_free_sectors in this function is neceressary. There is a possible 366 * The check_free_sectors in this function is necessary. There is a possible
367 * situation that after writing the Data area, the Block Control Information is 367 * situation that after writing the Data area, the Block Control Information is
368 * not updated according (due to power failure or something) which leaves the block 368 * not updated according (due to power failure or something) which leaves the block
369 * in an umconsistent state. So we have to check if a block is really FREE in this 369 * in an inconsistent state. So we have to check if a block is really FREE in this
370 * case. */ 370 * case. */
371static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_block) 371static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_block)
372{ 372{
@@ -428,7 +428,7 @@ static int calc_chain_length(struct NFTLrecord *nftl, unsigned int first_block)
428 428
429 for (;;) { 429 for (;;) {
430 length++; 430 length++;
431 /* avoid infinite loops, although this is guaranted not to 431 /* avoid infinite loops, although this is guaranteed not to
432 happen because of the previous checks */ 432 happen because of the previous checks */
433 if (length >= nftl->nb_blocks) { 433 if (length >= nftl->nb_blocks) {
434 printk("nftl: length too long %d !\n", length); 434 printk("nftl: length too long %d !\n", length);
@@ -447,11 +447,11 @@ static int calc_chain_length(struct NFTLrecord *nftl, unsigned int first_block)
447/* format_chain: Format an invalid Virtual Unit chain. It frees all the Erase Units in a 447/* format_chain: Format an invalid Virtual Unit chain. It frees all the Erase Units in a
448 * Virtual Unit Chain, i.e. all the units are disconnected. 448 * Virtual Unit Chain, i.e. all the units are disconnected.
449 * 449 *
450 * It is not stricly correct to begin from the first block of the chain because 450 * It is not strictly correct to begin from the first block of the chain because
451 * if we stop the code, we may see again a valid chain if there was a first_block 451 * if we stop the code, we may see again a valid chain if there was a first_block
452 * flag in a block inside it. But is it really a problem ? 452 * flag in a block inside it. But is it really a problem ?
453 * 453 *
454 * FixMe: Figure out what the last statesment means. What if power failure when we are 454 * FixMe: Figure out what the last statement means. What if power failure when we are
455 * in the for (;;) loop formatting blocks ?? 455 * in the for (;;) loop formatting blocks ??
456 */ 456 */
457static void format_chain(struct NFTLrecord *nftl, unsigned int first_block) 457static void format_chain(struct NFTLrecord *nftl, unsigned int first_block)
@@ -485,7 +485,7 @@ static void format_chain(struct NFTLrecord *nftl, unsigned int first_block)
485 * totally free (only 0xff). 485 * totally free (only 0xff).
486 * 486 *
487 * Definition: Free Erase Unit -- A properly erased/formatted Free Erase Unit should have meet the 487 * Definition: Free Erase Unit -- A properly erased/formatted Free Erase Unit should have meet the
488 * following critia: 488 * following criteria:
489 * 1. */ 489 * 1. */
490static int check_and_mark_free_block(struct NFTLrecord *nftl, int block) 490static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
491{ 491{
@@ -502,7 +502,7 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
502 erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1)); 502 erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
503 if (erase_mark != ERASE_MARK) { 503 if (erase_mark != ERASE_MARK) {
504 /* if no erase mark, the block must be totally free. This is 504 /* if no erase mark, the block must be totally free. This is
505 possible in two cases : empty filsystem or interrupted erase (very unlikely) */ 505 possible in two cases : empty filesystem or interrupted erase (very unlikely) */
506 if (check_free_sectors (nftl, block * nftl->EraseSize, nftl->EraseSize, 1) != 0) 506 if (check_free_sectors (nftl, block * nftl->EraseSize, nftl->EraseSize, 1) != 0)
507 return -1; 507 return -1;
508 508
@@ -544,7 +544,7 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
544/* get_fold_mark: Read fold mark from Unit Control Information #2, we use FOLD_MARK_IN_PROGRESS 544/* get_fold_mark: Read fold mark from Unit Control Information #2, we use FOLD_MARK_IN_PROGRESS
545 * to indicate that we are in the progression of a Virtual Unit Chain folding. If the UCI #2 545 * to indicate that we are in the progression of a Virtual Unit Chain folding. If the UCI #2
546 * is FOLD_MARK_IN_PROGRESS when mounting the NFTL, the (previous) folding process is interrupted 546 * is FOLD_MARK_IN_PROGRESS when mounting the NFTL, the (previous) folding process is interrupted
547 * for some reason. A clean up/check of the VUC is neceressary in this case. 547 * for some reason. A clean up/check of the VUC is necessary in this case.
548 * 548 *
549 * WARNING: return 0 if read error 549 * WARNING: return 0 if read error
550 */ 550 */
@@ -657,7 +657,7 @@ int NFTL_mount(struct NFTLrecord *s)
657 printk("Block %d: incorrect logical block: %d expected: %d\n", 657 printk("Block %d: incorrect logical block: %d expected: %d\n",
658 block, logical_block, first_logical_block); 658 block, logical_block, first_logical_block);
659 /* the chain is incorrect : we must format it, 659 /* the chain is incorrect : we must format it,
660 but we need to read it completly */ 660 but we need to read it completely */
661 do_format_chain = 1; 661 do_format_chain = 1;
662 } 662 }
663 if (is_first_block) { 663 if (is_first_block) {
@@ -669,7 +669,7 @@ int NFTL_mount(struct NFTLrecord *s)
669 printk("Block %d: incorrectly marked as first block in chain\n", 669 printk("Block %d: incorrectly marked as first block in chain\n",
670 block); 670 block);
671 /* the chain is incorrect : we must format it, 671 /* the chain is incorrect : we must format it,
672 but we need to read it completly */ 672 but we need to read it completely */
673 do_format_chain = 1; 673 do_format_chain = 1;
674 } else { 674 } else {
675 printk("Block %d: folding in progress - ignoring first block flag\n", 675 printk("Block %d: folding in progress - ignoring first block flag\n",
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index a996718fa6b0..64be8f0848b0 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -20,14 +20,23 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/mtd/partitions.h> 21#include <linux/mtd/partitions.h>
22 22
23int __devinit of_mtd_parse_partitions(struct device *dev, 23static int parse_ofpart_partitions(struct mtd_info *master,
24 struct device_node *node, 24 struct mtd_partition **pparts,
25 struct mtd_partition **pparts) 25 struct mtd_part_parser_data *data)
26{ 26{
27 struct device_node *node;
27 const char *partname; 28 const char *partname;
28 struct device_node *pp; 29 struct device_node *pp;
29 int nr_parts, i; 30 int nr_parts, i;
30 31
32
33 if (!data)
34 return 0;
35
36 node = data->of_node;
37 if (!node)
38 return 0;
39
31 /* First count the subnodes */ 40 /* First count the subnodes */
32 pp = NULL; 41 pp = NULL;
33 nr_parts = 0; 42 nr_parts = 0;
@@ -69,7 +78,7 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
69 78
70 if (!i) { 79 if (!i) {
71 of_node_put(pp); 80 of_node_put(pp);
72 dev_err(dev, "No valid partition found on %s\n", node->full_name); 81 pr_err("No valid partition found on %s\n", node->full_name);
73 kfree(*pparts); 82 kfree(*pparts);
74 *pparts = NULL; 83 *pparts = NULL;
75 return -EINVAL; 84 return -EINVAL;
@@ -77,6 +86,99 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
77 86
78 return nr_parts; 87 return nr_parts;
79} 88}
80EXPORT_SYMBOL(of_mtd_parse_partitions); 89
90static struct mtd_part_parser ofpart_parser = {
91 .owner = THIS_MODULE,
92 .parse_fn = parse_ofpart_partitions,
93 .name = "ofpart",
94};
95
96static int parse_ofoldpart_partitions(struct mtd_info *master,
97 struct mtd_partition **pparts,
98 struct mtd_part_parser_data *data)
99{
100 struct device_node *dp;
101 int i, plen, nr_parts;
102 const struct {
103 __be32 offset, len;
104 } *part;
105 const char *names;
106
107 if (!data)
108 return 0;
109
110 dp = data->of_node;
111 if (!dp)
112 return 0;
113
114 part = of_get_property(dp, "partitions", &plen);
115 if (!part)
116 return 0; /* No partitions found */
117
118 pr_warning("Device tree uses obsolete partition map binding: %s\n",
119 dp->full_name);
120
121 nr_parts = plen / sizeof(part[0]);
122
123 *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
124 if (!pparts)
125 return -ENOMEM;
126
127 names = of_get_property(dp, "partition-names", &plen);
128
129 for (i = 0; i < nr_parts; i++) {
130 (*pparts)[i].offset = be32_to_cpu(part->offset);
131 (*pparts)[i].size = be32_to_cpu(part->len) & ~1;
132 /* bit 0 set signifies read only partition */
133 if (be32_to_cpu(part->len) & 1)
134 (*pparts)[i].mask_flags = MTD_WRITEABLE;
135
136 if (names && (plen > 0)) {
137 int len = strlen(names) + 1;
138
139 (*pparts)[i].name = (char *)names;
140 plen -= len;
141 names += len;
142 } else {
143 (*pparts)[i].name = "unnamed";
144 }
145
146 part++;
147 }
148
149 return nr_parts;
150}
151
152static struct mtd_part_parser ofoldpart_parser = {
153 .owner = THIS_MODULE,
154 .parse_fn = parse_ofoldpart_partitions,
155 .name = "ofoldpart",
156};
157
158static int __init ofpart_parser_init(void)
159{
160 int rc;
161 rc = register_mtd_parser(&ofpart_parser);
162 if (rc)
163 goto out;
164
165 rc = register_mtd_parser(&ofoldpart_parser);
166 if (!rc)
167 return 0;
168
169 deregister_mtd_parser(&ofoldpart_parser);
170out:
171 return rc;
172}
173
174module_init(ofpart_parser_init);
81 175
82MODULE_LICENSE("GPL"); 176MODULE_LICENSE("GPL");
177MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree");
178MODULE_AUTHOR("Vitaly Wool, David Gibson");
179/*
180 * When MTD core cannot find the requested parser, it tries to load the module
181 * with the same name. Since we provide the ofoldpart parser, we should have
182 * the corresponding alias.
183 */
184MODULE_ALIAS("ofoldpart");
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 2d70d354d846..7813095264a5 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -30,11 +30,8 @@
30 */ 30 */
31#define DRIVER_NAME "onenand-flash" 31#define DRIVER_NAME "onenand-flash"
32 32
33static const char *part_probes[] = { "cmdlinepart", NULL, };
34
35struct onenand_info { 33struct onenand_info {
36 struct mtd_info mtd; 34 struct mtd_info mtd;
37 struct mtd_partition *parts;
38 struct onenand_chip onenand; 35 struct onenand_chip onenand;
39}; 36};
40 37
@@ -73,13 +70,9 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev)
73 goto out_iounmap; 70 goto out_iounmap;
74 } 71 }
75 72
76 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); 73 err = mtd_device_parse_register(&info->mtd, NULL, 0,
77 if (err > 0) 74 pdata ? pdata->parts : NULL,
78 mtd_device_register(&info->mtd, info->parts, err); 75 pdata ? pdata->nr_parts : 0);
79 else if (err <= 0 && pdata && pdata->parts)
80 mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
81 else
82 err = mtd_device_register(&info->mtd, NULL, 0);
83 76
84 platform_set_drvdata(pdev, info); 77 platform_set_drvdata(pdev, info);
85 78
@@ -104,7 +97,6 @@ static int __devexit generic_onenand_remove(struct platform_device *pdev)
104 platform_set_drvdata(pdev, NULL); 97 platform_set_drvdata(pdev, NULL);
105 98
106 if (info) { 99 if (info) {
107 mtd_device_unregister(&info->mtd);
108 onenand_release(&info->mtd); 100 onenand_release(&info->mtd);
109 release_mem_region(res->start, size); 101 release_mem_region(res->start, size);
110 iounmap(info->onenand.base); 102 iounmap(info->onenand.base);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 6a1d6d9a2df9..7e9ea6852b67 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -57,7 +57,6 @@ struct omap2_onenand {
57 unsigned long phys_base; 57 unsigned long phys_base;
58 int gpio_irq; 58 int gpio_irq;
59 struct mtd_info mtd; 59 struct mtd_info mtd;
60 struct mtd_partition *parts;
61 struct onenand_chip onenand; 60 struct onenand_chip onenand;
62 struct completion irq_done; 61 struct completion irq_done;
63 struct completion dma_done; 62 struct completion dma_done;
@@ -67,8 +66,6 @@ struct omap2_onenand {
67 struct regulator *regulator; 66 struct regulator *regulator;
68}; 67};
69 68
70static const char *part_probes[] = { "cmdlinepart", NULL, };
71
72static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data) 69static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
73{ 70{
74 struct omap2_onenand *c = data; 71 struct omap2_onenand *c = data;
@@ -741,6 +738,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
741 c->regulator = regulator_get(&pdev->dev, "vonenand"); 738 c->regulator = regulator_get(&pdev->dev, "vonenand");
742 if (IS_ERR(c->regulator)) { 739 if (IS_ERR(c->regulator)) {
743 dev_err(&pdev->dev, "Failed to get regulator\n"); 740 dev_err(&pdev->dev, "Failed to get regulator\n");
741 r = PTR_ERR(c->regulator);
744 goto err_release_dma; 742 goto err_release_dma;
745 } 743 }
746 c->onenand.enable = omap2_onenand_enable; 744 c->onenand.enable = omap2_onenand_enable;
@@ -753,13 +751,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
753 if ((r = onenand_scan(&c->mtd, 1)) < 0) 751 if ((r = onenand_scan(&c->mtd, 1)) < 0)
754 goto err_release_regulator; 752 goto err_release_regulator;
755 753
756 r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0); 754 r = mtd_device_parse_register(&c->mtd, NULL, 0,
757 if (r > 0) 755 pdata ? pdata->parts : NULL,
758 r = mtd_device_register(&c->mtd, c->parts, r); 756 pdata ? pdata->nr_parts : 0);
759 else if (pdata->parts != NULL)
760 r = mtd_device_register(&c->mtd, pdata->parts, pdata->nr_parts);
761 else
762 r = mtd_device_register(&c->mtd, NULL, 0);
763 if (r) 757 if (r)
764 goto err_release_onenand; 758 goto err_release_onenand;
765 759
@@ -786,7 +780,6 @@ err_release_mem_region:
786err_free_cs: 780err_free_cs:
787 gpmc_cs_free(c->gpmc_cs); 781 gpmc_cs_free(c->gpmc_cs);
788err_kfree: 782err_kfree:
789 kfree(c->parts);
790 kfree(c); 783 kfree(c);
791 784
792 return r; 785 return r;
@@ -809,7 +802,6 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
809 iounmap(c->onenand.base); 802 iounmap(c->onenand.base);
810 release_mem_region(c->phys_base, ONENAND_IO_SIZE); 803 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
811 gpmc_cs_free(c->gpmc_cs); 804 gpmc_cs_free(c->gpmc_cs);
812 kfree(c->parts);
813 kfree(c); 805 kfree(c);
814 806
815 return 0; 807 return 0;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index ac9e959802a7..a8394730b4b6 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1015,7 +1015,7 @@ static void onenand_release_device(struct mtd_info *mtd)
1015} 1015}
1016 1016
1017/** 1017/**
1018 * onenand_transfer_auto_oob - [Internal] oob auto-placement transfer 1018 * onenand_transfer_auto_oob - [INTERN] oob auto-placement transfer
1019 * @param mtd MTD device structure 1019 * @param mtd MTD device structure
1020 * @param buf destination address 1020 * @param buf destination address
1021 * @param column oob offset to read from 1021 * @param column oob offset to read from
@@ -1079,7 +1079,7 @@ static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
1079 return status; 1079 return status;
1080 1080
1081 /* check if we failed due to uncorrectable error */ 1081 /* check if we failed due to uncorrectable error */
1082 if (status != -EBADMSG && status != ONENAND_BBT_READ_ECC_ERROR) 1082 if (!mtd_is_eccerr(status) && status != ONENAND_BBT_READ_ECC_ERROR)
1083 return status; 1083 return status;
1084 1084
1085 /* check if address lies in MLC region */ 1085 /* check if address lies in MLC region */
@@ -1122,10 +1122,10 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1122 int ret = 0; 1122 int ret = 0;
1123 int writesize = this->writesize; 1123 int writesize = this->writesize;
1124 1124
1125 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n", 1125 pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
1126 __func__, (unsigned int) from, (int) len); 1126 (int)len);
1127 1127
1128 if (ops->mode == MTD_OOB_AUTO) 1128 if (ops->mode == MTD_OPS_AUTO_OOB)
1129 oobsize = this->ecclayout->oobavail; 1129 oobsize = this->ecclayout->oobavail;
1130 else 1130 else
1131 oobsize = mtd->oobsize; 1131 oobsize = mtd->oobsize;
@@ -1159,7 +1159,7 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1159 if (unlikely(ret)) 1159 if (unlikely(ret))
1160 ret = onenand_recover_lsb(mtd, from, ret); 1160 ret = onenand_recover_lsb(mtd, from, ret);
1161 onenand_update_bufferram(mtd, from, !ret); 1161 onenand_update_bufferram(mtd, from, !ret);
1162 if (ret == -EBADMSG) 1162 if (mtd_is_eccerr(ret))
1163 ret = 0; 1163 ret = 0;
1164 if (ret) 1164 if (ret)
1165 break; 1165 break;
@@ -1170,7 +1170,7 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1170 thisooblen = oobsize - oobcolumn; 1170 thisooblen = oobsize - oobcolumn;
1171 thisooblen = min_t(int, thisooblen, ooblen - oobread); 1171 thisooblen = min_t(int, thisooblen, ooblen - oobread);
1172 1172
1173 if (ops->mode == MTD_OOB_AUTO) 1173 if (ops->mode == MTD_OPS_AUTO_OOB)
1174 onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen); 1174 onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
1175 else 1175 else
1176 this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen); 1176 this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
@@ -1226,10 +1226,10 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1226 int ret = 0, boundary = 0; 1226 int ret = 0, boundary = 0;
1227 int writesize = this->writesize; 1227 int writesize = this->writesize;
1228 1228
1229 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n", 1229 pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
1230 __func__, (unsigned int) from, (int) len); 1230 (int)len);
1231 1231
1232 if (ops->mode == MTD_OOB_AUTO) 1232 if (ops->mode == MTD_OPS_AUTO_OOB)
1233 oobsize = this->ecclayout->oobavail; 1233 oobsize = this->ecclayout->oobavail;
1234 else 1234 else
1235 oobsize = mtd->oobsize; 1235 oobsize = mtd->oobsize;
@@ -1255,7 +1255,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1255 this->command(mtd, ONENAND_CMD_READ, from, writesize); 1255 this->command(mtd, ONENAND_CMD_READ, from, writesize);
1256 ret = this->wait(mtd, FL_READING); 1256 ret = this->wait(mtd, FL_READING);
1257 onenand_update_bufferram(mtd, from, !ret); 1257 onenand_update_bufferram(mtd, from, !ret);
1258 if (ret == -EBADMSG) 1258 if (mtd_is_eccerr(ret))
1259 ret = 0; 1259 ret = 0;
1260 } 1260 }
1261 } 1261 }
@@ -1291,7 +1291,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1291 thisooblen = oobsize - oobcolumn; 1291 thisooblen = oobsize - oobcolumn;
1292 thisooblen = min_t(int, thisooblen, ooblen - oobread); 1292 thisooblen = min_t(int, thisooblen, ooblen - oobread);
1293 1293
1294 if (ops->mode == MTD_OOB_AUTO) 1294 if (ops->mode == MTD_OPS_AUTO_OOB)
1295 onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen); 1295 onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
1296 else 1296 else
1297 this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen); 1297 this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
@@ -1315,7 +1315,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1315 /* Now wait for load */ 1315 /* Now wait for load */
1316 ret = this->wait(mtd, FL_READING); 1316 ret = this->wait(mtd, FL_READING);
1317 onenand_update_bufferram(mtd, from, !ret); 1317 onenand_update_bufferram(mtd, from, !ret);
1318 if (ret == -EBADMSG) 1318 if (mtd_is_eccerr(ret))
1319 ret = 0; 1319 ret = 0;
1320 } 1320 }
1321 1321
@@ -1351,19 +1351,19 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1351 struct mtd_ecc_stats stats; 1351 struct mtd_ecc_stats stats;
1352 int read = 0, thislen, column, oobsize; 1352 int read = 0, thislen, column, oobsize;
1353 size_t len = ops->ooblen; 1353 size_t len = ops->ooblen;
1354 mtd_oob_mode_t mode = ops->mode; 1354 unsigned int mode = ops->mode;
1355 u_char *buf = ops->oobbuf; 1355 u_char *buf = ops->oobbuf;
1356 int ret = 0, readcmd; 1356 int ret = 0, readcmd;
1357 1357
1358 from += ops->ooboffs; 1358 from += ops->ooboffs;
1359 1359
1360 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n", 1360 pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
1361 __func__, (unsigned int) from, (int) len); 1361 (int)len);
1362 1362
1363 /* Initialize return length value */ 1363 /* Initialize return length value */
1364 ops->oobretlen = 0; 1364 ops->oobretlen = 0;
1365 1365
1366 if (mode == MTD_OOB_AUTO) 1366 if (mode == MTD_OPS_AUTO_OOB)
1367 oobsize = this->ecclayout->oobavail; 1367 oobsize = this->ecclayout->oobavail;
1368 else 1368 else
1369 oobsize = mtd->oobsize; 1369 oobsize = mtd->oobsize;
@@ -1403,13 +1403,13 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1403 if (unlikely(ret)) 1403 if (unlikely(ret))
1404 ret = onenand_recover_lsb(mtd, from, ret); 1404 ret = onenand_recover_lsb(mtd, from, ret);
1405 1405
1406 if (ret && ret != -EBADMSG) { 1406 if (ret && !mtd_is_eccerr(ret)) {
1407 printk(KERN_ERR "%s: read failed = 0x%x\n", 1407 printk(KERN_ERR "%s: read failed = 0x%x\n",
1408 __func__, ret); 1408 __func__, ret);
1409 break; 1409 break;
1410 } 1410 }
1411 1411
1412 if (mode == MTD_OOB_AUTO) 1412 if (mode == MTD_OPS_AUTO_OOB)
1413 onenand_transfer_auto_oob(mtd, buf, column, thislen); 1413 onenand_transfer_auto_oob(mtd, buf, column, thislen);
1414 else 1414 else
1415 this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen); 1415 this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
@@ -1487,10 +1487,10 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
1487 int ret; 1487 int ret;
1488 1488
1489 switch (ops->mode) { 1489 switch (ops->mode) {
1490 case MTD_OOB_PLACE: 1490 case MTD_OPS_PLACE_OOB:
1491 case MTD_OOB_AUTO: 1491 case MTD_OPS_AUTO_OOB:
1492 break; 1492 break;
1493 case MTD_OOB_RAW: 1493 case MTD_OPS_RAW:
1494 /* Not implemented yet */ 1494 /* Not implemented yet */
1495 default: 1495 default:
1496 return -EINVAL; 1496 return -EINVAL;
@@ -1576,8 +1576,8 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
1576 size_t len = ops->ooblen; 1576 size_t len = ops->ooblen;
1577 u_char *buf = ops->oobbuf; 1577 u_char *buf = ops->oobbuf;
1578 1578
1579 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %zi\n", 1579 pr_debug("%s: from = 0x%08x, len = %zi\n", __func__, (unsigned int)from,
1580 __func__, (unsigned int) from, len); 1580 len);
1581 1581
1582 /* Initialize return value */ 1582 /* Initialize return value */
1583 ops->oobretlen = 0; 1583 ops->oobretlen = 0;
@@ -1750,8 +1750,8 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1750 /* Wait for any existing operation to clear */ 1750 /* Wait for any existing operation to clear */
1751 onenand_panic_wait(mtd); 1751 onenand_panic_wait(mtd);
1752 1752
1753 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", 1753 pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
1754 __func__, (unsigned int) to, (int) len); 1754 (int)len);
1755 1755
1756 /* Initialize retlen, in case of early exit */ 1756 /* Initialize retlen, in case of early exit */
1757 *retlen = 0; 1757 *retlen = 0;
@@ -1821,7 +1821,7 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1821} 1821}
1822 1822
1823/** 1823/**
1824 * onenand_fill_auto_oob - [Internal] oob auto-placement transfer 1824 * onenand_fill_auto_oob - [INTERN] oob auto-placement transfer
1825 * @param mtd MTD device structure 1825 * @param mtd MTD device structure
1826 * @param oob_buf oob buffer 1826 * @param oob_buf oob buffer
1827 * @param buf source address 1827 * @param buf source address
@@ -1883,8 +1883,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1883 u_char *oobbuf; 1883 u_char *oobbuf;
1884 int ret = 0, cmd; 1884 int ret = 0, cmd;
1885 1885
1886 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", 1886 pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
1887 __func__, (unsigned int) to, (int) len); 1887 (int)len);
1888 1888
1889 /* Initialize retlen, in case of early exit */ 1889 /* Initialize retlen, in case of early exit */
1890 ops->retlen = 0; 1890 ops->retlen = 0;
@@ -1908,7 +1908,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1908 if (!len) 1908 if (!len)
1909 return 0; 1909 return 0;
1910 1910
1911 if (ops->mode == MTD_OOB_AUTO) 1911 if (ops->mode == MTD_OPS_AUTO_OOB)
1912 oobsize = this->ecclayout->oobavail; 1912 oobsize = this->ecclayout->oobavail;
1913 else 1913 else
1914 oobsize = mtd->oobsize; 1914 oobsize = mtd->oobsize;
@@ -1945,7 +1945,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1945 /* We send data to spare ram with oobsize 1945 /* We send data to spare ram with oobsize
1946 * to prevent byte access */ 1946 * to prevent byte access */
1947 memset(oobbuf, 0xff, mtd->oobsize); 1947 memset(oobbuf, 0xff, mtd->oobsize);
1948 if (ops->mode == MTD_OOB_AUTO) 1948 if (ops->mode == MTD_OPS_AUTO_OOB)
1949 onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen); 1949 onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen);
1950 else 1950 else
1951 memcpy(oobbuf + oobcolumn, oob, thisooblen); 1951 memcpy(oobbuf + oobcolumn, oob, thisooblen);
@@ -2055,7 +2055,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
2055 2055
2056 2056
2057/** 2057/**
2058 * onenand_write_oob_nolock - [Internal] OneNAND write out-of-band 2058 * onenand_write_oob_nolock - [INTERN] OneNAND write out-of-band
2059 * @param mtd MTD device structure 2059 * @param mtd MTD device structure
2060 * @param to offset to write to 2060 * @param to offset to write to
2061 * @param len number of bytes to write 2061 * @param len number of bytes to write
@@ -2074,17 +2074,17 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2074 u_char *oobbuf; 2074 u_char *oobbuf;
2075 size_t len = ops->ooblen; 2075 size_t len = ops->ooblen;
2076 const u_char *buf = ops->oobbuf; 2076 const u_char *buf = ops->oobbuf;
2077 mtd_oob_mode_t mode = ops->mode; 2077 unsigned int mode = ops->mode;
2078 2078
2079 to += ops->ooboffs; 2079 to += ops->ooboffs;
2080 2080
2081 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", 2081 pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
2082 __func__, (unsigned int) to, (int) len); 2082 (int)len);
2083 2083
2084 /* Initialize retlen, in case of early exit */ 2084 /* Initialize retlen, in case of early exit */
2085 ops->oobretlen = 0; 2085 ops->oobretlen = 0;
2086 2086
2087 if (mode == MTD_OOB_AUTO) 2087 if (mode == MTD_OPS_AUTO_OOB)
2088 oobsize = this->ecclayout->oobavail; 2088 oobsize = this->ecclayout->oobavail;
2089 else 2089 else
2090 oobsize = mtd->oobsize; 2090 oobsize = mtd->oobsize;
@@ -2128,7 +2128,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2128 /* We send data to spare ram with oobsize 2128 /* We send data to spare ram with oobsize
2129 * to prevent byte access */ 2129 * to prevent byte access */
2130 memset(oobbuf, 0xff, mtd->oobsize); 2130 memset(oobbuf, 0xff, mtd->oobsize);
2131 if (mode == MTD_OOB_AUTO) 2131 if (mode == MTD_OPS_AUTO_OOB)
2132 onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen); 2132 onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen);
2133 else 2133 else
2134 memcpy(oobbuf + column, buf, thislen); 2134 memcpy(oobbuf + column, buf, thislen);
@@ -2217,10 +2217,10 @@ static int onenand_write_oob(struct mtd_info *mtd, loff_t to,
2217 int ret; 2217 int ret;
2218 2218
2219 switch (ops->mode) { 2219 switch (ops->mode) {
2220 case MTD_OOB_PLACE: 2220 case MTD_OPS_PLACE_OOB:
2221 case MTD_OOB_AUTO: 2221 case MTD_OPS_AUTO_OOB:
2222 break; 2222 break;
2223 case MTD_OOB_RAW: 2223 case MTD_OPS_RAW:
2224 /* Not implemented yet */ 2224 /* Not implemented yet */
2225 default: 2225 default:
2226 return -EINVAL; 2226 return -EINVAL;
@@ -2281,7 +2281,7 @@ static int onenand_multiblock_erase_verify(struct mtd_info *mtd,
2281} 2281}
2282 2282
2283/** 2283/**
2284 * onenand_multiblock_erase - [Internal] erase block(s) using multiblock erase 2284 * onenand_multiblock_erase - [INTERN] erase block(s) using multiblock erase
2285 * @param mtd MTD device structure 2285 * @param mtd MTD device structure
2286 * @param instr erase instruction 2286 * @param instr erase instruction
2287 * @param region erase region 2287 * @param region erase region
@@ -2397,7 +2397,7 @@ static int onenand_multiblock_erase(struct mtd_info *mtd,
2397 2397
2398 2398
2399/** 2399/**
2400 * onenand_block_by_block_erase - [Internal] erase block(s) using regular erase 2400 * onenand_block_by_block_erase - [INTERN] erase block(s) using regular erase
2401 * @param mtd MTD device structure 2401 * @param mtd MTD device structure
2402 * @param instr erase instruction 2402 * @param instr erase instruction
2403 * @param region erase region 2403 * @param region erase region
@@ -2489,8 +2489,9 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2489 struct mtd_erase_region_info *region = NULL; 2489 struct mtd_erase_region_info *region = NULL;
2490 loff_t region_offset = 0; 2490 loff_t region_offset = 0;
2491 2491
2492 DEBUG(MTD_DEBUG_LEVEL3, "%s: start=0x%012llx, len=%llu\n", __func__, 2492 pr_debug("%s: start=0x%012llx, len=%llu\n", __func__,
2493 (unsigned long long) instr->addr, (unsigned long long) instr->len); 2493 (unsigned long long)instr->addr,
2494 (unsigned long long)instr->len);
2494 2495
2495 /* Do not allow erase past end of device */ 2496 /* Do not allow erase past end of device */
2496 if (unlikely((len + addr) > mtd->size)) { 2497 if (unlikely((len + addr) > mtd->size)) {
@@ -2558,7 +2559,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2558 */ 2559 */
2559static void onenand_sync(struct mtd_info *mtd) 2560static void onenand_sync(struct mtd_info *mtd)
2560{ 2561{
2561 DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__); 2562 pr_debug("%s: called\n", __func__);
2562 2563
2563 /* Grab the lock and see if the device is available */ 2564 /* Grab the lock and see if the device is available */
2564 onenand_get_device(mtd, FL_SYNCING); 2565 onenand_get_device(mtd, FL_SYNCING);
@@ -2602,7 +2603,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
2602 struct bbm_info *bbm = this->bbm; 2603 struct bbm_info *bbm = this->bbm;
2603 u_char buf[2] = {0, 0}; 2604 u_char buf[2] = {0, 0};
2604 struct mtd_oob_ops ops = { 2605 struct mtd_oob_ops ops = {
2605 .mode = MTD_OOB_PLACE, 2606 .mode = MTD_OPS_PLACE_OOB,
2606 .ooblen = 2, 2607 .ooblen = 2,
2607 .oobbuf = buf, 2608 .oobbuf = buf,
2608 .ooboffs = 0, 2609 .ooboffs = 0,
@@ -2922,7 +2923,7 @@ static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr,
2922} 2923}
2923 2924
2924/** 2925/**
2925 * onenand_otp_write_oob_nolock - [Internal] OneNAND write out-of-band, specific to OTP 2926 * onenand_otp_write_oob_nolock - [INTERN] OneNAND write out-of-band, specific to OTP
2926 * @param mtd MTD device structure 2927 * @param mtd MTD device structure
2927 * @param to offset to write to 2928 * @param to offset to write to
2928 * @param len number of bytes to write 2929 * @param len number of bytes to write
@@ -3170,7 +3171,7 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
3170 this->command(mtd, ONENAND_CMD_RESET, 0, 0); 3171 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
3171 this->wait(mtd, FL_RESETING); 3172 this->wait(mtd, FL_RESETING);
3172 } else { 3173 } else {
3173 ops.mode = MTD_OOB_PLACE; 3174 ops.mode = MTD_OPS_PLACE_OOB;
3174 ops.ooblen = len; 3175 ops.ooblen = len;
3175 ops.oobbuf = buf; 3176 ops.oobbuf = buf;
3176 ops.ooboffs = 0; 3177 ops.ooboffs = 0;
@@ -3429,6 +3430,19 @@ static void onenand_check_features(struct mtd_info *mtd)
3429 else if (numbufs == 1) { 3430 else if (numbufs == 1) {
3430 this->options |= ONENAND_HAS_4KB_PAGE; 3431 this->options |= ONENAND_HAS_4KB_PAGE;
3431 this->options |= ONENAND_HAS_CACHE_PROGRAM; 3432 this->options |= ONENAND_HAS_CACHE_PROGRAM;
3433 /*
3434 * There are two different 4KiB pagesize chips
3435 * and no way to detect it by H/W config values.
3436 *
3437 * To detect the correct NOP for each chips,
3438 * It should check the version ID as workaround.
3439 *
3440 * Now it has as following
3441 * KFM4G16Q4M has NOP 4 with version ID 0x0131
3442 * KFM4G16Q5M has NOP 1 with versoin ID 0x013e
3443 */
3444 if ((this->version_id & 0xf) == 0xe)
3445 this->options |= ONENAND_HAS_NOP_1;
3432 } 3446 }
3433 3447
3434 case ONENAND_DEVICE_DENSITY_2Gb: 3448 case ONENAND_DEVICE_DENSITY_2Gb:
@@ -3663,7 +3677,7 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int
3663 int i, ret; 3677 int i, ret;
3664 int block; 3678 int block;
3665 struct mtd_oob_ops ops = { 3679 struct mtd_oob_ops ops = {
3666 .mode = MTD_OOB_PLACE, 3680 .mode = MTD_OPS_PLACE_OOB,
3667 .ooboffs = 0, 3681 .ooboffs = 0,
3668 .ooblen = mtd->oobsize, 3682 .ooblen = mtd->oobsize,
3669 .datbuf = NULL, 3683 .datbuf = NULL,
@@ -4054,6 +4068,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
4054 this->ecclayout = &onenand_oob_128; 4068 this->ecclayout = &onenand_oob_128;
4055 mtd->subpage_sft = 2; 4069 mtd->subpage_sft = 2;
4056 } 4070 }
4071 if (ONENAND_IS_NOP_1(this))
4072 mtd->subpage_sft = 0;
4057 break; 4073 break;
4058 case 64: 4074 case 64:
4059 this->ecclayout = &onenand_oob_64; 4075 this->ecclayout = &onenand_oob_64;
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index b2d7fc5ea25d..66fe3b7e7851 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -81,7 +81,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
81 startblock = 0; 81 startblock = 0;
82 from = 0; 82 from = 0;
83 83
84 ops.mode = MTD_OOB_PLACE; 84 ops.mode = MTD_OPS_PLACE_OOB;
85 ops.ooblen = readlen; 85 ops.ooblen = readlen;
86 ops.oobbuf = buf; 86 ops.oobbuf = buf;
87 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; 87 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
@@ -154,7 +154,7 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
154 block = (int) (onenand_block(this, offs) << 1); 154 block = (int) (onenand_block(this, offs) << 1);
155 res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03; 155 res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
156 156
157 DEBUG(MTD_DEBUG_LEVEL2, "onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n", 157 pr_debug("onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
158 (unsigned int) offs, block >> 1, res); 158 (unsigned int) offs, block >> 1, res);
159 159
160 switch ((int) res) { 160 switch ((int) res) {
@@ -189,10 +189,8 @@ int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
189 len = this->chipsize >> (this->erase_shift + 2); 189 len = this->chipsize >> (this->erase_shift + 2);
190 /* Allocate memory (2bit per block) and clear the memory bad block table */ 190 /* Allocate memory (2bit per block) and clear the memory bad block table */
191 bbm->bbt = kzalloc(len, GFP_KERNEL); 191 bbm->bbt = kzalloc(len, GFP_KERNEL);
192 if (!bbm->bbt) { 192 if (!bbm->bbt)
193 printk(KERN_ERR "onenand_scan_bbt: Out of memory\n");
194 return -ENOMEM; 193 return -ENOMEM;
195 }
196 194
197 /* Set the bad block position */ 195 /* Set the bad block position */
198 bbm->badblockpos = ONENAND_BADBLOCK_POS; 196 bbm->badblockpos = ONENAND_BADBLOCK_POS;
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 3306b5b3c736..5474547eafc2 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -147,7 +147,6 @@ struct s3c_onenand {
147 struct resource *dma_res; 147 struct resource *dma_res;
148 unsigned long phys_base; 148 unsigned long phys_base;
149 struct completion complete; 149 struct completion complete;
150 struct mtd_partition *parts;
151}; 150};
152 151
153#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1))) 152#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
@@ -157,8 +156,6 @@ struct s3c_onenand {
157 156
158static struct s3c_onenand *onenand; 157static struct s3c_onenand *onenand;
159 158
160static const char *part_probes[] = { "cmdlinepart", NULL, };
161
162static inline int s3c_read_reg(int offset) 159static inline int s3c_read_reg(int offset)
163{ 160{
164 return readl(onenand->base + offset); 161 return readl(onenand->base + offset);
@@ -1017,13 +1014,9 @@ static int s3c_onenand_probe(struct platform_device *pdev)
1017 if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ) 1014 if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
1018 dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n"); 1015 dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
1019 1016
1020 err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0); 1017 err = mtd_device_parse_register(mtd, NULL, 0,
1021 if (err > 0) 1018 pdata ? pdata->parts : NULL,
1022 mtd_device_register(mtd, onenand->parts, err); 1019 pdata ? pdata->nr_parts : 0);
1023 else if (err <= 0 && pdata && pdata->parts)
1024 mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
1025 else
1026 err = mtd_device_register(mtd, NULL, 0);
1027 1020
1028 platform_set_drvdata(pdev, mtd); 1021 platform_set_drvdata(pdev, mtd);
1029 1022
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index 84b4dda023f4..e366b1d84ead 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -57,8 +57,8 @@ static inline int redboot_checksum(struct fis_image_desc *img)
57} 57}
58 58
59static int parse_redboot_partitions(struct mtd_info *master, 59static int parse_redboot_partitions(struct mtd_info *master,
60 struct mtd_partition **pparts, 60 struct mtd_partition **pparts,
61 unsigned long fis_origin) 61 struct mtd_part_parser_data *data)
62{ 62{
63 int nrparts = 0; 63 int nrparts = 0;
64 struct fis_image_desc *buf; 64 struct fis_image_desc *buf;
@@ -198,11 +198,10 @@ static int parse_redboot_partitions(struct mtd_info *master,
198 goto out; 198 goto out;
199 } 199 }
200 new_fl->img = &buf[i]; 200 new_fl->img = &buf[i];
201 if (fis_origin) { 201 if (data && data->origin)
202 buf[i].flash_base -= fis_origin; 202 buf[i].flash_base -= data->origin;
203 } else { 203 else
204 buf[i].flash_base &= master->size-1; 204 buf[i].flash_base &= master->size-1;
205 }
206 205
207 /* I'm sure the JFFS2 code has done me permanent damage. 206 /* I'm sure the JFFS2 code has done me permanent damage.
208 * I now think the following is _normal_ 207 * I now think the following is _normal_
@@ -298,6 +297,9 @@ static struct mtd_part_parser redboot_parser = {
298 .name = "RedBoot", 297 .name = "RedBoot",
299}; 298};
300 299
300/* mtd parsers will request the module by parser name */
301MODULE_ALIAS("RedBoot");
302
301static int __init redboot_parser_init(void) 303static int __init redboot_parser_init(void)
302{ 304{
303 return register_mtd_parser(&redboot_parser); 305 return register_mtd_parser(&redboot_parser);
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index ed3d6cd2c6dc..fddb714e323c 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -34,7 +34,7 @@ module_param(debug, int, S_IRUGO | S_IWUSR);
34MODULE_PARM_DESC(debug, "Debug level (0-2)"); 34MODULE_PARM_DESC(debug, "Debug level (0-2)");
35 35
36 36
37/* ------------------- sysfs attributtes ---------------------------------- */ 37/* ------------------- sysfs attributes ---------------------------------- */
38struct sm_sysfs_attribute { 38struct sm_sysfs_attribute {
39 struct device_attribute dev_attr; 39 struct device_attribute dev_attr;
40 char *data; 40 char *data;
@@ -138,7 +138,7 @@ static int sm_get_lba(uint8_t *lba)
138 if ((lba[0] & 0xF8) != 0x10) 138 if ((lba[0] & 0xF8) != 0x10)
139 return -2; 139 return -2;
140 140
141 /* check parity - endianess doesn't matter */ 141 /* check parity - endianness doesn't matter */
142 if (hweight16(*(uint16_t *)lba) & 1) 142 if (hweight16(*(uint16_t *)lba) & 1)
143 return -2; 143 return -2;
144 144
@@ -147,7 +147,7 @@ static int sm_get_lba(uint8_t *lba)
147 147
148 148
149/* 149/*
150 * Read LBA asscociated with block 150 * Read LBA associated with block
151 * returns -1, if block is erased 151 * returns -1, if block is erased
152 * returns -2 if error happens 152 * returns -2 if error happens
153 */ 153 */
@@ -252,11 +252,11 @@ static int sm_read_sector(struct sm_ftl *ftl,
252 return 0; 252 return 0;
253 } 253 }
254 254
255 /* User might not need the oob, but we do for data vertification */ 255 /* User might not need the oob, but we do for data verification */
256 if (!oob) 256 if (!oob)
257 oob = &tmp_oob; 257 oob = &tmp_oob;
258 258
259 ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE; 259 ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
260 ops.ooboffs = 0; 260 ops.ooboffs = 0;
261 ops.ooblen = SM_OOB_SIZE; 261 ops.ooblen = SM_OOB_SIZE;
262 ops.oobbuf = (void *)oob; 262 ops.oobbuf = (void *)oob;
@@ -276,12 +276,12 @@ again:
276 return ret; 276 return ret;
277 } 277 }
278 278
279 /* Unfortunelly, oob read will _always_ succeed, 279 /* Unfortunately, oob read will _always_ succeed,
280 despite card removal..... */ 280 despite card removal..... */
281 ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); 281 ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
282 282
283 /* Test for unknown errors */ 283 /* Test for unknown errors */
284 if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) { 284 if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
285 dbg("read of block %d at zone %d, failed due to error (%d)", 285 dbg("read of block %d at zone %d, failed due to error (%d)",
286 block, zone, ret); 286 block, zone, ret);
287 goto again; 287 goto again;
@@ -306,7 +306,7 @@ again:
306 } 306 }
307 307
308 /* Test ECC*/ 308 /* Test ECC*/
309 if (ret == -EBADMSG || 309 if (mtd_is_eccerr(ret) ||
310 (ftl->smallpagenand && sm_correct_sector(buffer, oob))) { 310 (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
311 311
312 dbg("read of block %d at zone %d, failed due to ECC error", 312 dbg("read of block %d at zone %d, failed due to ECC error",
@@ -336,7 +336,7 @@ static int sm_write_sector(struct sm_ftl *ftl,
336 if (ftl->unstable) 336 if (ftl->unstable)
337 return -EIO; 337 return -EIO;
338 338
339 ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE; 339 ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
340 ops.len = SM_SECTOR_SIZE; 340 ops.len = SM_SECTOR_SIZE;
341 ops.datbuf = buffer; 341 ops.datbuf = buffer;
342 ops.ooboffs = 0; 342 ops.ooboffs = 0;
@@ -447,14 +447,14 @@ static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
447 447
448 /* We aren't checking the return value, because we don't care */ 448 /* We aren't checking the return value, because we don't care */
449 /* This also fails on fake xD cards, but I guess these won't expose 449 /* This also fails on fake xD cards, but I guess these won't expose
450 any bad blocks till fail completly */ 450 any bad blocks till fail completely */
451 for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE) 451 for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
452 sm_write_sector(ftl, zone, block, boffset, NULL, &oob); 452 sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
453} 453}
454 454
455/* 455/*
456 * Erase a block within a zone 456 * Erase a block within a zone
457 * If erase succedes, it updates free block fifo, otherwise marks block as bad 457 * If erase succeeds, it updates free block fifo, otherwise marks block as bad
458 */ 458 */
459static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block, 459static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
460 int put_free) 460 int put_free)
@@ -510,7 +510,7 @@ static void sm_erase_callback(struct erase_info *self)
510 complete(&ftl->erase_completion); 510 complete(&ftl->erase_completion);
511} 511}
512 512
513/* Throughtly test that block is valid. */ 513/* Thoroughly test that block is valid. */
514static int sm_check_block(struct sm_ftl *ftl, int zone, int block) 514static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
515{ 515{
516 int boffset; 516 int boffset;
@@ -526,7 +526,7 @@ static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
526 for (boffset = 0; boffset < ftl->block_size; 526 for (boffset = 0; boffset < ftl->block_size;
527 boffset += SM_SECTOR_SIZE) { 527 boffset += SM_SECTOR_SIZE) {
528 528
529 /* This shoudn't happen anyway */ 529 /* This shouldn't happen anyway */
530 if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob)) 530 if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
531 return -2; 531 return -2;
532 532
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 5cd189793332..976e3d28b962 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -135,8 +135,7 @@ static int get_valid_cis_sector(struct mtd_info *mtd)
135 /* Found */ 135 /* Found */
136 cis_sector = (int)(offset >> SECTOR_SHIFT); 136 cis_sector = (int)(offset >> SECTOR_SHIFT);
137 } else { 137 } else {
138 DEBUG(MTD_DEBUG_LEVEL1, 138 pr_debug("SSFDC_RO: CIS/IDI sector not found"
139 "SSFDC_RO: CIS/IDI sector not found"
140 " on %s (mtd%d)\n", mtd->name, 139 " on %s (mtd%d)\n", mtd->name,
141 mtd->index); 140 mtd->index);
142 } 141 }
@@ -170,7 +169,7 @@ static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf)
170 struct mtd_oob_ops ops; 169 struct mtd_oob_ops ops;
171 int ret; 170 int ret;
172 171
173 ops.mode = MTD_OOB_RAW; 172 ops.mode = MTD_OPS_RAW;
174 ops.ooboffs = 0; 173 ops.ooboffs = 0;
175 ops.ooblen = OOB_SIZE; 174 ops.ooblen = OOB_SIZE;
176 ops.oobbuf = buf; 175 ops.oobbuf = buf;
@@ -221,8 +220,7 @@ static int get_logical_address(uint8_t *oob_buf)
221 block_address >>= 1; 220 block_address >>= 1;
222 221
223 if (get_parity(block_address, 10) != parity) { 222 if (get_parity(block_address, 10) != parity) {
224 DEBUG(MTD_DEBUG_LEVEL0, 223 pr_debug("SSFDC_RO: logical address field%d"
225 "SSFDC_RO: logical address field%d"
226 "parity error(0x%04X)\n", j+1, 224 "parity error(0x%04X)\n", j+1,
227 block_address); 225 block_address);
228 } else { 226 } else {
@@ -235,7 +233,7 @@ static int get_logical_address(uint8_t *oob_buf)
235 if (!ok) 233 if (!ok)
236 block_address = -2; 234 block_address = -2;
237 235
238 DEBUG(MTD_DEBUG_LEVEL3, "SSFDC_RO: get_logical_address() %d\n", 236 pr_debug("SSFDC_RO: get_logical_address() %d\n",
239 block_address); 237 block_address);
240 238
241 return block_address; 239 return block_address;
@@ -249,7 +247,7 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
249 int ret, block_address, phys_block; 247 int ret, block_address, phys_block;
250 struct mtd_info *mtd = ssfdc->mbd.mtd; 248 struct mtd_info *mtd = ssfdc->mbd.mtd;
251 249
252 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: build_block_map() nblks=%d (%luK)\n", 250 pr_debug("SSFDC_RO: build_block_map() nblks=%d (%luK)\n",
253 ssfdc->map_len, 251 ssfdc->map_len,
254 (unsigned long)ssfdc->map_len * ssfdc->erase_size / 1024); 252 (unsigned long)ssfdc->map_len * ssfdc->erase_size / 1024);
255 253
@@ -262,8 +260,7 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
262 260
263 ret = read_raw_oob(mtd, offset, oob_buf); 261 ret = read_raw_oob(mtd, offset, oob_buf);
264 if (ret < 0) { 262 if (ret < 0) {
265 DEBUG(MTD_DEBUG_LEVEL0, 263 pr_debug("SSFDC_RO: mtd read_oob() failed at %lu\n",
266 "SSFDC_RO: mtd read_oob() failed at %lu\n",
267 offset); 264 offset);
268 return -1; 265 return -1;
269 } 266 }
@@ -279,8 +276,7 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
279 ssfdc->logic_block_map[block_address] = 276 ssfdc->logic_block_map[block_address] =
280 (unsigned short)phys_block; 277 (unsigned short)phys_block;
281 278
282 DEBUG(MTD_DEBUG_LEVEL2, 279 pr_debug("SSFDC_RO: build_block_map() phys_block=%d,"
283 "SSFDC_RO: build_block_map() phys_block=%d,"
284 "logic_block_addr=%d, zone=%d\n", 280 "logic_block_addr=%d, zone=%d\n",
285 phys_block, block_address, zone_index); 281 phys_block, block_address, zone_index);
286 } 282 }
@@ -304,11 +300,8 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
304 return; 300 return;
305 301
306 ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL); 302 ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL);
307 if (!ssfdc) { 303 if (!ssfdc)
308 printk(KERN_WARNING
309 "SSFDC_RO: out of memory for data structures\n");
310 return; 304 return;
311 }
312 305
313 ssfdc->mbd.mtd = mtd; 306 ssfdc->mbd.mtd = mtd;
314 ssfdc->mbd.devnum = -1; 307 ssfdc->mbd.devnum = -1;
@@ -319,8 +312,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
319 ssfdc->erase_size = mtd->erasesize; 312 ssfdc->erase_size = mtd->erasesize;
320 ssfdc->map_len = (u32)mtd->size / mtd->erasesize; 313 ssfdc->map_len = (u32)mtd->size / mtd->erasesize;
321 314
322 DEBUG(MTD_DEBUG_LEVEL1, 315 pr_debug("SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
323 "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
324 ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len, 316 ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len,
325 DIV_ROUND_UP(ssfdc->map_len, MAX_PHYS_BLK_PER_ZONE)); 317 DIV_ROUND_UP(ssfdc->map_len, MAX_PHYS_BLK_PER_ZONE));
326 318
@@ -331,7 +323,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
331 ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) / 323 ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) /
332 ((long)ssfdc->sectors * (long)ssfdc->heads)); 324 ((long)ssfdc->sectors * (long)ssfdc->heads));
333 325
334 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n", 326 pr_debug("SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
335 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors, 327 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors,
336 (long)ssfdc->cylinders * (long)ssfdc->heads * 328 (long)ssfdc->cylinders * (long)ssfdc->heads *
337 (long)ssfdc->sectors); 329 (long)ssfdc->sectors);
@@ -342,11 +334,8 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
342 /* Allocate logical block map */ 334 /* Allocate logical block map */
343 ssfdc->logic_block_map = kmalloc(sizeof(ssfdc->logic_block_map[0]) * 335 ssfdc->logic_block_map = kmalloc(sizeof(ssfdc->logic_block_map[0]) *
344 ssfdc->map_len, GFP_KERNEL); 336 ssfdc->map_len, GFP_KERNEL);
345 if (!ssfdc->logic_block_map) { 337 if (!ssfdc->logic_block_map)
346 printk(KERN_WARNING
347 "SSFDC_RO: out of memory for data structures\n");
348 goto out_err; 338 goto out_err;
349 }
350 memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) * 339 memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) *
351 ssfdc->map_len); 340 ssfdc->map_len);
352 341
@@ -371,7 +360,7 @@ static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
371{ 360{
372 struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev; 361 struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
373 362
374 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: remove_dev (i=%d)\n", dev->devnum); 363 pr_debug("SSFDC_RO: remove_dev (i=%d)\n", dev->devnum);
375 364
376 del_mtd_blktrans_dev(dev); 365 del_mtd_blktrans_dev(dev);
377 kfree(ssfdc->logic_block_map); 366 kfree(ssfdc->logic_block_map);
@@ -387,8 +376,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
387 offset = (int)(logic_sect_no % sectors_per_block); 376 offset = (int)(logic_sect_no % sectors_per_block);
388 block_address = (int)(logic_sect_no / sectors_per_block); 377 block_address = (int)(logic_sect_no / sectors_per_block);
389 378
390 DEBUG(MTD_DEBUG_LEVEL3, 379 pr_debug("SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d,"
391 "SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d,"
392 " block_addr=%d\n", logic_sect_no, sectors_per_block, offset, 380 " block_addr=%d\n", logic_sect_no, sectors_per_block, offset,
393 block_address); 381 block_address);
394 382
@@ -397,8 +385,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
397 385
398 block_address = ssfdc->logic_block_map[block_address]; 386 block_address = ssfdc->logic_block_map[block_address];
399 387
400 DEBUG(MTD_DEBUG_LEVEL3, 388 pr_debug("SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n",
401 "SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n",
402 block_address); 389 block_address);
403 390
404 if (block_address < 0xffff) { 391 if (block_address < 0xffff) {
@@ -407,8 +394,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
407 sect_no = (unsigned long)block_address * sectors_per_block + 394 sect_no = (unsigned long)block_address * sectors_per_block +
408 offset; 395 offset;
409 396
410 DEBUG(MTD_DEBUG_LEVEL3, 397 pr_debug("SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n",
411 "SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n",
412 sect_no); 398 sect_no);
413 399
414 if (read_physical_sector(ssfdc->mbd.mtd, buf, sect_no) < 0) 400 if (read_physical_sector(ssfdc->mbd.mtd, buf, sect_no) < 0)
@@ -424,7 +410,7 @@ static int ssfdcr_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
424{ 410{
425 struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev; 411 struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
426 412
427 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n", 413 pr_debug("SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n",
428 ssfdc->cylinders, ssfdc->heads, ssfdc->sectors); 414 ssfdc->cylinders, ssfdc->heads, ssfdc->sectors);
429 415
430 geo->heads = ssfdc->heads; 416 geo->heads = ssfdc->heads;
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index dec92ae6111a..933f7e5f32d3 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -30,7 +30,7 @@
30 30
31#define PRINT_PREF KERN_INFO "mtd_oobtest: " 31#define PRINT_PREF KERN_INFO "mtd_oobtest: "
32 32
33static int dev; 33static int dev = -EINVAL;
34module_param(dev, int, S_IRUGO); 34module_param(dev, int, S_IRUGO);
35MODULE_PARM_DESC(dev, "MTD device number to use"); 35MODULE_PARM_DESC(dev, "MTD device number to use");
36 36
@@ -131,7 +131,7 @@ static int write_eraseblock(int ebnum)
131 131
132 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { 132 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
133 set_random_data(writebuf, use_len); 133 set_random_data(writebuf, use_len);
134 ops.mode = MTD_OOB_AUTO; 134 ops.mode = MTD_OPS_AUTO_OOB;
135 ops.len = 0; 135 ops.len = 0;
136 ops.retlen = 0; 136 ops.retlen = 0;
137 ops.ooblen = use_len; 137 ops.ooblen = use_len;
@@ -184,7 +184,7 @@ static int verify_eraseblock(int ebnum)
184 184
185 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { 185 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
186 set_random_data(writebuf, use_len); 186 set_random_data(writebuf, use_len);
187 ops.mode = MTD_OOB_AUTO; 187 ops.mode = MTD_OPS_AUTO_OOB;
188 ops.len = 0; 188 ops.len = 0;
189 ops.retlen = 0; 189 ops.retlen = 0;
190 ops.ooblen = use_len; 190 ops.ooblen = use_len;
@@ -211,7 +211,7 @@ static int verify_eraseblock(int ebnum)
211 if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) { 211 if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) {
212 int k; 212 int k;
213 213
214 ops.mode = MTD_OOB_AUTO; 214 ops.mode = MTD_OPS_AUTO_OOB;
215 ops.len = 0; 215 ops.len = 0;
216 ops.retlen = 0; 216 ops.retlen = 0;
217 ops.ooblen = mtd->ecclayout->oobavail; 217 ops.ooblen = mtd->ecclayout->oobavail;
@@ -276,7 +276,7 @@ static int verify_eraseblock_in_one_go(int ebnum)
276 size_t len = mtd->ecclayout->oobavail * pgcnt; 276 size_t len = mtd->ecclayout->oobavail * pgcnt;
277 277
278 set_random_data(writebuf, len); 278 set_random_data(writebuf, len);
279 ops.mode = MTD_OOB_AUTO; 279 ops.mode = MTD_OPS_AUTO_OOB;
280 ops.len = 0; 280 ops.len = 0;
281 ops.retlen = 0; 281 ops.retlen = 0;
282 ops.ooblen = len; 282 ops.ooblen = len;
@@ -366,6 +366,13 @@ static int __init mtd_oobtest_init(void)
366 366
367 printk(KERN_INFO "\n"); 367 printk(KERN_INFO "\n");
368 printk(KERN_INFO "=================================================\n"); 368 printk(KERN_INFO "=================================================\n");
369
370 if (dev < 0) {
371 printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
372 printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
373 return -EINVAL;
374 }
375
369 printk(PRINT_PREF "MTD device: %d\n", dev); 376 printk(PRINT_PREF "MTD device: %d\n", dev);
370 377
371 mtd = get_mtd_device(NULL, dev); 378 mtd = get_mtd_device(NULL, dev);
@@ -507,7 +514,7 @@ static int __init mtd_oobtest_init(void)
507 addr0 += mtd->erasesize; 514 addr0 += mtd->erasesize;
508 515
509 /* Attempt to write off end of OOB */ 516 /* Attempt to write off end of OOB */
510 ops.mode = MTD_OOB_AUTO; 517 ops.mode = MTD_OPS_AUTO_OOB;
511 ops.len = 0; 518 ops.len = 0;
512 ops.retlen = 0; 519 ops.retlen = 0;
513 ops.ooblen = 1; 520 ops.ooblen = 1;
@@ -527,7 +534,7 @@ static int __init mtd_oobtest_init(void)
527 } 534 }
528 535
529 /* Attempt to read off end of OOB */ 536 /* Attempt to read off end of OOB */
530 ops.mode = MTD_OOB_AUTO; 537 ops.mode = MTD_OPS_AUTO_OOB;
531 ops.len = 0; 538 ops.len = 0;
532 ops.retlen = 0; 539 ops.retlen = 0;
533 ops.ooblen = 1; 540 ops.ooblen = 1;
@@ -551,7 +558,7 @@ static int __init mtd_oobtest_init(void)
551 "block is bad\n"); 558 "block is bad\n");
552 else { 559 else {
553 /* Attempt to write off end of device */ 560 /* Attempt to write off end of device */
554 ops.mode = MTD_OOB_AUTO; 561 ops.mode = MTD_OPS_AUTO_OOB;
555 ops.len = 0; 562 ops.len = 0;
556 ops.retlen = 0; 563 ops.retlen = 0;
557 ops.ooblen = mtd->ecclayout->oobavail + 1; 564 ops.ooblen = mtd->ecclayout->oobavail + 1;
@@ -571,7 +578,7 @@ static int __init mtd_oobtest_init(void)
571 } 578 }
572 579
573 /* Attempt to read off end of device */ 580 /* Attempt to read off end of device */
574 ops.mode = MTD_OOB_AUTO; 581 ops.mode = MTD_OPS_AUTO_OOB;
575 ops.len = 0; 582 ops.len = 0;
576 ops.retlen = 0; 583 ops.retlen = 0;
577 ops.ooblen = mtd->ecclayout->oobavail + 1; 584 ops.ooblen = mtd->ecclayout->oobavail + 1;
@@ -595,7 +602,7 @@ static int __init mtd_oobtest_init(void)
595 goto out; 602 goto out;
596 603
597 /* Attempt to write off end of device */ 604 /* Attempt to write off end of device */
598 ops.mode = MTD_OOB_AUTO; 605 ops.mode = MTD_OPS_AUTO_OOB;
599 ops.len = 0; 606 ops.len = 0;
600 ops.retlen = 0; 607 ops.retlen = 0;
601 ops.ooblen = mtd->ecclayout->oobavail; 608 ops.ooblen = mtd->ecclayout->oobavail;
@@ -615,7 +622,7 @@ static int __init mtd_oobtest_init(void)
615 } 622 }
616 623
617 /* Attempt to read off end of device */ 624 /* Attempt to read off end of device */
618 ops.mode = MTD_OOB_AUTO; 625 ops.mode = MTD_OPS_AUTO_OOB;
619 ops.len = 0; 626 ops.len = 0;
620 ops.retlen = 0; 627 ops.retlen = 0;
621 ops.ooblen = mtd->ecclayout->oobavail; 628 ops.ooblen = mtd->ecclayout->oobavail;
@@ -655,7 +662,7 @@ static int __init mtd_oobtest_init(void)
655 addr = (i + 1) * mtd->erasesize - mtd->writesize; 662 addr = (i + 1) * mtd->erasesize - mtd->writesize;
656 for (pg = 0; pg < cnt; ++pg) { 663 for (pg = 0; pg < cnt; ++pg) {
657 set_random_data(writebuf, sz); 664 set_random_data(writebuf, sz);
658 ops.mode = MTD_OOB_AUTO; 665 ops.mode = MTD_OPS_AUTO_OOB;
659 ops.len = 0; 666 ops.len = 0;
660 ops.retlen = 0; 667 ops.retlen = 0;
661 ops.ooblen = sz; 668 ops.ooblen = sz;
@@ -683,7 +690,7 @@ static int __init mtd_oobtest_init(void)
683 continue; 690 continue;
684 set_random_data(writebuf, mtd->ecclayout->oobavail * 2); 691 set_random_data(writebuf, mtd->ecclayout->oobavail * 2);
685 addr = (i + 1) * mtd->erasesize - mtd->writesize; 692 addr = (i + 1) * mtd->erasesize - mtd->writesize;
686 ops.mode = MTD_OOB_AUTO; 693 ops.mode = MTD_OPS_AUTO_OOB;
687 ops.len = 0; 694 ops.len = 0;
688 ops.retlen = 0; 695 ops.retlen = 0;
689 ops.ooblen = mtd->ecclayout->oobavail * 2; 696 ops.ooblen = mtd->ecclayout->oobavail * 2;
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index 00b937e38c1d..afafb6935fd0 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -30,7 +30,7 @@
30 30
31#define PRINT_PREF KERN_INFO "mtd_pagetest: " 31#define PRINT_PREF KERN_INFO "mtd_pagetest: "
32 32
33static int dev; 33static int dev = -EINVAL;
34module_param(dev, int, S_IRUGO); 34module_param(dev, int, S_IRUGO);
35MODULE_PARM_DESC(dev, "MTD device number to use"); 35MODULE_PARM_DESC(dev, "MTD device number to use");
36 36
@@ -128,7 +128,7 @@ static int verify_eraseblock(int ebnum)
128 for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) { 128 for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
129 /* Do a read to set the internal dataRAMs to different data */ 129 /* Do a read to set the internal dataRAMs to different data */
130 err = mtd->read(mtd, addr0, bufsize, &read, twopages); 130 err = mtd->read(mtd, addr0, bufsize, &read, twopages);
131 if (err == -EUCLEAN) 131 if (mtd_is_bitflip(err))
132 err = 0; 132 err = 0;
133 if (err || read != bufsize) { 133 if (err || read != bufsize) {
134 printk(PRINT_PREF "error: read failed at %#llx\n", 134 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -136,7 +136,7 @@ static int verify_eraseblock(int ebnum)
136 return err; 136 return err;
137 } 137 }
138 err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages); 138 err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
139 if (err == -EUCLEAN) 139 if (mtd_is_bitflip(err))
140 err = 0; 140 err = 0;
141 if (err || read != bufsize) { 141 if (err || read != bufsize) {
142 printk(PRINT_PREF "error: read failed at %#llx\n", 142 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -146,7 +146,7 @@ static int verify_eraseblock(int ebnum)
146 memset(twopages, 0, bufsize); 146 memset(twopages, 0, bufsize);
147 read = 0; 147 read = 0;
148 err = mtd->read(mtd, addr, bufsize, &read, twopages); 148 err = mtd->read(mtd, addr, bufsize, &read, twopages);
149 if (err == -EUCLEAN) 149 if (mtd_is_bitflip(err))
150 err = 0; 150 err = 0;
151 if (err || read != bufsize) { 151 if (err || read != bufsize) {
152 printk(PRINT_PREF "error: read failed at %#llx\n", 152 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -164,7 +164,7 @@ static int verify_eraseblock(int ebnum)
164 unsigned long oldnext = next; 164 unsigned long oldnext = next;
165 /* Do a read to set the internal dataRAMs to different data */ 165 /* Do a read to set the internal dataRAMs to different data */
166 err = mtd->read(mtd, addr0, bufsize, &read, twopages); 166 err = mtd->read(mtd, addr0, bufsize, &read, twopages);
167 if (err == -EUCLEAN) 167 if (mtd_is_bitflip(err))
168 err = 0; 168 err = 0;
169 if (err || read != bufsize) { 169 if (err || read != bufsize) {
170 printk(PRINT_PREF "error: read failed at %#llx\n", 170 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -172,7 +172,7 @@ static int verify_eraseblock(int ebnum)
172 return err; 172 return err;
173 } 173 }
174 err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages); 174 err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
175 if (err == -EUCLEAN) 175 if (mtd_is_bitflip(err))
176 err = 0; 176 err = 0;
177 if (err || read != bufsize) { 177 if (err || read != bufsize) {
178 printk(PRINT_PREF "error: read failed at %#llx\n", 178 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -182,7 +182,7 @@ static int verify_eraseblock(int ebnum)
182 memset(twopages, 0, bufsize); 182 memset(twopages, 0, bufsize);
183 read = 0; 183 read = 0;
184 err = mtd->read(mtd, addr, bufsize, &read, twopages); 184 err = mtd->read(mtd, addr, bufsize, &read, twopages);
185 if (err == -EUCLEAN) 185 if (mtd_is_bitflip(err))
186 err = 0; 186 err = 0;
187 if (err || read != bufsize) { 187 if (err || read != bufsize) {
188 printk(PRINT_PREF "error: read failed at %#llx\n", 188 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -231,7 +231,7 @@ static int crosstest(void)
231 read = 0; 231 read = 0;
232 addr = addrn - pgsize - pgsize; 232 addr = addrn - pgsize - pgsize;
233 err = mtd->read(mtd, addr, pgsize, &read, pp1); 233 err = mtd->read(mtd, addr, pgsize, &read, pp1);
234 if (err == -EUCLEAN) 234 if (mtd_is_bitflip(err))
235 err = 0; 235 err = 0;
236 if (err || read != pgsize) { 236 if (err || read != pgsize) {
237 printk(PRINT_PREF "error: read failed at %#llx\n", 237 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -244,7 +244,7 @@ static int crosstest(void)
244 read = 0; 244 read = 0;
245 addr = addrn - pgsize - pgsize - pgsize; 245 addr = addrn - pgsize - pgsize - pgsize;
246 err = mtd->read(mtd, addr, pgsize, &read, pp1); 246 err = mtd->read(mtd, addr, pgsize, &read, pp1);
247 if (err == -EUCLEAN) 247 if (mtd_is_bitflip(err))
248 err = 0; 248 err = 0;
249 if (err || read != pgsize) { 249 if (err || read != pgsize) {
250 printk(PRINT_PREF "error: read failed at %#llx\n", 250 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -258,7 +258,7 @@ static int crosstest(void)
258 addr = addr0; 258 addr = addr0;
259 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); 259 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
260 err = mtd->read(mtd, addr, pgsize, &read, pp2); 260 err = mtd->read(mtd, addr, pgsize, &read, pp2);
261 if (err == -EUCLEAN) 261 if (mtd_is_bitflip(err))
262 err = 0; 262 err = 0;
263 if (err || read != pgsize) { 263 if (err || read != pgsize) {
264 printk(PRINT_PREF "error: read failed at %#llx\n", 264 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -272,7 +272,7 @@ static int crosstest(void)
272 addr = addrn - pgsize; 272 addr = addrn - pgsize;
273 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); 273 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
274 err = mtd->read(mtd, addr, pgsize, &read, pp3); 274 err = mtd->read(mtd, addr, pgsize, &read, pp3);
275 if (err == -EUCLEAN) 275 if (mtd_is_bitflip(err))
276 err = 0; 276 err = 0;
277 if (err || read != pgsize) { 277 if (err || read != pgsize) {
278 printk(PRINT_PREF "error: read failed at %#llx\n", 278 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -286,7 +286,7 @@ static int crosstest(void)
286 addr = addr0; 286 addr = addr0;
287 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); 287 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
288 err = mtd->read(mtd, addr, pgsize, &read, pp4); 288 err = mtd->read(mtd, addr, pgsize, &read, pp4);
289 if (err == -EUCLEAN) 289 if (mtd_is_bitflip(err))
290 err = 0; 290 err = 0;
291 if (err || read != pgsize) { 291 if (err || read != pgsize) {
292 printk(PRINT_PREF "error: read failed at %#llx\n", 292 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -345,7 +345,7 @@ static int erasecrosstest(void)
345 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); 345 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
346 memset(readbuf, 0, pgsize); 346 memset(readbuf, 0, pgsize);
347 err = mtd->read(mtd, addr0, pgsize, &read, readbuf); 347 err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
348 if (err == -EUCLEAN) 348 if (mtd_is_bitflip(err))
349 err = 0; 349 err = 0;
350 if (err || read != pgsize) { 350 if (err || read != pgsize) {
351 printk(PRINT_PREF "error: read failed at %#llx\n", 351 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -383,7 +383,7 @@ static int erasecrosstest(void)
383 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); 383 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
384 memset(readbuf, 0, pgsize); 384 memset(readbuf, 0, pgsize);
385 err = mtd->read(mtd, addr0, pgsize, &read, readbuf); 385 err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
386 if (err == -EUCLEAN) 386 if (mtd_is_bitflip(err))
387 err = 0; 387 err = 0;
388 if (err || read != pgsize) { 388 if (err || read != pgsize) {
389 printk(PRINT_PREF "error: read failed at %#llx\n", 389 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -439,7 +439,7 @@ static int erasetest(void)
439 439
440 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); 440 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
441 err = mtd->read(mtd, addr0, pgsize, &read, twopages); 441 err = mtd->read(mtd, addr0, pgsize, &read, twopages);
442 if (err == -EUCLEAN) 442 if (mtd_is_bitflip(err))
443 err = 0; 443 err = 0;
444 if (err || read != pgsize) { 444 if (err || read != pgsize) {
445 printk(PRINT_PREF "error: read failed at %#llx\n", 445 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -504,6 +504,13 @@ static int __init mtd_pagetest_init(void)
504 504
505 printk(KERN_INFO "\n"); 505 printk(KERN_INFO "\n");
506 printk(KERN_INFO "=================================================\n"); 506 printk(KERN_INFO "=================================================\n");
507
508 if (dev < 0) {
509 printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
510 printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
511 return -EINVAL;
512 }
513
507 printk(PRINT_PREF "MTD device: %d\n", dev); 514 printk(PRINT_PREF "MTD device: %d\n", dev);
508 515
509 mtd = get_mtd_device(NULL, dev); 516 mtd = get_mtd_device(NULL, dev);
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index afe71aa15c4b..550fe51225a7 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -29,7 +29,7 @@
29 29
30#define PRINT_PREF KERN_INFO "mtd_readtest: " 30#define PRINT_PREF KERN_INFO "mtd_readtest: "
31 31
32static int dev; 32static int dev = -EINVAL;
33module_param(dev, int, S_IRUGO); 33module_param(dev, int, S_IRUGO);
34MODULE_PARM_DESC(dev, "MTD device number to use"); 34MODULE_PARM_DESC(dev, "MTD device number to use");
35 35
@@ -66,7 +66,7 @@ static int read_eraseblock_by_page(int ebnum)
66 if (mtd->oobsize) { 66 if (mtd->oobsize) {
67 struct mtd_oob_ops ops; 67 struct mtd_oob_ops ops;
68 68
69 ops.mode = MTD_OOB_PLACE; 69 ops.mode = MTD_OPS_PLACE_OOB;
70 ops.len = 0; 70 ops.len = 0;
71 ops.retlen = 0; 71 ops.retlen = 0;
72 ops.ooblen = mtd->oobsize; 72 ops.ooblen = mtd->oobsize;
@@ -75,7 +75,8 @@ static int read_eraseblock_by_page(int ebnum)
75 ops.datbuf = NULL; 75 ops.datbuf = NULL;
76 ops.oobbuf = oobbuf; 76 ops.oobbuf = oobbuf;
77 ret = mtd->read_oob(mtd, addr, &ops); 77 ret = mtd->read_oob(mtd, addr, &ops);
78 if (ret || ops.oobretlen != mtd->oobsize) { 78 if ((ret && !mtd_is_bitflip(ret)) ||
79 ops.oobretlen != mtd->oobsize) {
79 printk(PRINT_PREF "error: read oob failed at " 80 printk(PRINT_PREF "error: read oob failed at "
80 "%#llx\n", (long long)addr); 81 "%#llx\n", (long long)addr);
81 if (!err) 82 if (!err)
@@ -169,6 +170,12 @@ static int __init mtd_readtest_init(void)
169 170
170 printk(KERN_INFO "\n"); 171 printk(KERN_INFO "\n");
171 printk(KERN_INFO "=================================================\n"); 172 printk(KERN_INFO "=================================================\n");
173
174 if (dev < 0) {
175 printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
176 return -EINVAL;
177 }
178
172 printk(PRINT_PREF "MTD device: %d\n", dev); 179 printk(PRINT_PREF "MTD device: %d\n", dev);
173 180
174 mtd = get_mtd_device(NULL, dev); 181 mtd = get_mtd_device(NULL, dev);
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 627d4e2466a3..493b367bdd35 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -29,7 +29,7 @@
29 29
30#define PRINT_PREF KERN_INFO "mtd_speedtest: " 30#define PRINT_PREF KERN_INFO "mtd_speedtest: "
31 31
32static int dev; 32static int dev = -EINVAL;
33module_param(dev, int, S_IRUGO); 33module_param(dev, int, S_IRUGO);
34MODULE_PARM_DESC(dev, "MTD device number to use"); 34MODULE_PARM_DESC(dev, "MTD device number to use");
35 35
@@ -216,7 +216,7 @@ static int read_eraseblock(int ebnum)
216 216
217 err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf); 217 err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf);
218 /* Ignore corrected ECC errors */ 218 /* Ignore corrected ECC errors */
219 if (err == -EUCLEAN) 219 if (mtd_is_bitflip(err))
220 err = 0; 220 err = 0;
221 if (err || read != mtd->erasesize) { 221 if (err || read != mtd->erasesize) {
222 printk(PRINT_PREF "error: read failed at %#llx\n", addr); 222 printk(PRINT_PREF "error: read failed at %#llx\n", addr);
@@ -237,7 +237,7 @@ static int read_eraseblock_by_page(int ebnum)
237 for (i = 0; i < pgcnt; i++) { 237 for (i = 0; i < pgcnt; i++) {
238 err = mtd->read(mtd, addr, pgsize, &read, buf); 238 err = mtd->read(mtd, addr, pgsize, &read, buf);
239 /* Ignore corrected ECC errors */ 239 /* Ignore corrected ECC errors */
240 if (err == -EUCLEAN) 240 if (mtd_is_bitflip(err))
241 err = 0; 241 err = 0;
242 if (err || read != pgsize) { 242 if (err || read != pgsize) {
243 printk(PRINT_PREF "error: read failed at %#llx\n", 243 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -263,7 +263,7 @@ static int read_eraseblock_by_2pages(int ebnum)
263 for (i = 0; i < n; i++) { 263 for (i = 0; i < n; i++) {
264 err = mtd->read(mtd, addr, sz, &read, buf); 264 err = mtd->read(mtd, addr, sz, &read, buf);
265 /* Ignore corrected ECC errors */ 265 /* Ignore corrected ECC errors */
266 if (err == -EUCLEAN) 266 if (mtd_is_bitflip(err))
267 err = 0; 267 err = 0;
268 if (err || read != sz) { 268 if (err || read != sz) {
269 printk(PRINT_PREF "error: read failed at %#llx\n", 269 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -278,7 +278,7 @@ static int read_eraseblock_by_2pages(int ebnum)
278 if (pgcnt % 2) { 278 if (pgcnt % 2) {
279 err = mtd->read(mtd, addr, pgsize, &read, buf); 279 err = mtd->read(mtd, addr, pgsize, &read, buf);
280 /* Ignore corrected ECC errors */ 280 /* Ignore corrected ECC errors */
281 if (err == -EUCLEAN) 281 if (mtd_is_bitflip(err))
282 err = 0; 282 err = 0;
283 if (err || read != pgsize) { 283 if (err || read != pgsize) {
284 printk(PRINT_PREF "error: read failed at %#llx\n", 284 printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -361,6 +361,13 @@ static int __init mtd_speedtest_init(void)
361 361
362 printk(KERN_INFO "\n"); 362 printk(KERN_INFO "\n");
363 printk(KERN_INFO "=================================================\n"); 363 printk(KERN_INFO "=================================================\n");
364
365 if (dev < 0) {
366 printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
367 printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
368 return -EINVAL;
369 }
370
364 if (count) 371 if (count)
365 printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count); 372 printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count);
366 else 373 else
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 531625fc9259..52ffd9120e0d 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -30,7 +30,7 @@
30 30
31#define PRINT_PREF KERN_INFO "mtd_stresstest: " 31#define PRINT_PREF KERN_INFO "mtd_stresstest: "
32 32
33static int dev; 33static int dev = -EINVAL;
34module_param(dev, int, S_IRUGO); 34module_param(dev, int, S_IRUGO);
35MODULE_PARM_DESC(dev, "MTD device number to use"); 35MODULE_PARM_DESC(dev, "MTD device number to use");
36 36
@@ -154,7 +154,7 @@ static int do_read(void)
154 } 154 }
155 addr = eb * mtd->erasesize + offs; 155 addr = eb * mtd->erasesize + offs;
156 err = mtd->read(mtd, addr, len, &read, readbuf); 156 err = mtd->read(mtd, addr, len, &read, readbuf);
157 if (err == -EUCLEAN) 157 if (mtd_is_bitflip(err))
158 err = 0; 158 err = 0;
159 if (unlikely(err || read != len)) { 159 if (unlikely(err || read != len)) {
160 printk(PRINT_PREF "error: read failed at 0x%llx\n", 160 printk(PRINT_PREF "error: read failed at 0x%llx\n",
@@ -250,6 +250,13 @@ static int __init mtd_stresstest_init(void)
250 250
251 printk(KERN_INFO "\n"); 251 printk(KERN_INFO "\n");
252 printk(KERN_INFO "=================================================\n"); 252 printk(KERN_INFO "=================================================\n");
253
254 if (dev < 0) {
255 printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
256 printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
257 return -EINVAL;
258 }
259
253 printk(PRINT_PREF "MTD device: %d\n", dev); 260 printk(PRINT_PREF "MTD device: %d\n", dev);
254 261
255 mtd = get_mtd_device(NULL, dev); 262 mtd = get_mtd_device(NULL, dev);
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index 334eae53a3db..1a05bfac4eee 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -29,7 +29,7 @@
29 29
30#define PRINT_PREF KERN_INFO "mtd_subpagetest: " 30#define PRINT_PREF KERN_INFO "mtd_subpagetest: "
31 31
32static int dev; 32static int dev = -EINVAL;
33module_param(dev, int, S_IRUGO); 33module_param(dev, int, S_IRUGO);
34MODULE_PARM_DESC(dev, "MTD device number to use"); 34MODULE_PARM_DESC(dev, "MTD device number to use");
35 35
@@ -198,7 +198,7 @@ static int verify_eraseblock(int ebnum)
198 read = 0; 198 read = 0;
199 err = mtd->read(mtd, addr, subpgsize, &read, readbuf); 199 err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
200 if (unlikely(err || read != subpgsize)) { 200 if (unlikely(err || read != subpgsize)) {
201 if (err == -EUCLEAN && read == subpgsize) { 201 if (mtd_is_bitflip(err) && read == subpgsize) {
202 printk(PRINT_PREF "ECC correction at %#llx\n", 202 printk(PRINT_PREF "ECC correction at %#llx\n",
203 (long long)addr); 203 (long long)addr);
204 err = 0; 204 err = 0;
@@ -226,7 +226,7 @@ static int verify_eraseblock(int ebnum)
226 read = 0; 226 read = 0;
227 err = mtd->read(mtd, addr, subpgsize, &read, readbuf); 227 err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
228 if (unlikely(err || read != subpgsize)) { 228 if (unlikely(err || read != subpgsize)) {
229 if (err == -EUCLEAN && read == subpgsize) { 229 if (mtd_is_bitflip(err) && read == subpgsize) {
230 printk(PRINT_PREF "ECC correction at %#llx\n", 230 printk(PRINT_PREF "ECC correction at %#llx\n",
231 (long long)addr); 231 (long long)addr);
232 err = 0; 232 err = 0;
@@ -264,7 +264,7 @@ static int verify_eraseblock2(int ebnum)
264 read = 0; 264 read = 0;
265 err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf); 265 err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf);
266 if (unlikely(err || read != subpgsize * k)) { 266 if (unlikely(err || read != subpgsize * k)) {
267 if (err == -EUCLEAN && read == subpgsize * k) { 267 if (mtd_is_bitflip(err) && read == subpgsize * k) {
268 printk(PRINT_PREF "ECC correction at %#llx\n", 268 printk(PRINT_PREF "ECC correction at %#llx\n",
269 (long long)addr); 269 (long long)addr);
270 err = 0; 270 err = 0;
@@ -298,7 +298,7 @@ static int verify_eraseblock_ff(int ebnum)
298 read = 0; 298 read = 0;
299 err = mtd->read(mtd, addr, subpgsize, &read, readbuf); 299 err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
300 if (unlikely(err || read != subpgsize)) { 300 if (unlikely(err || read != subpgsize)) {
301 if (err == -EUCLEAN && read == subpgsize) { 301 if (mtd_is_bitflip(err) && read == subpgsize) {
302 printk(PRINT_PREF "ECC correction at %#llx\n", 302 printk(PRINT_PREF "ECC correction at %#llx\n",
303 (long long)addr); 303 (long long)addr);
304 err = 0; 304 err = 0;
@@ -379,6 +379,13 @@ static int __init mtd_subpagetest_init(void)
379 379
380 printk(KERN_INFO "\n"); 380 printk(KERN_INFO "\n");
381 printk(KERN_INFO "=================================================\n"); 381 printk(KERN_INFO "=================================================\n");
382
383 if (dev < 0) {
384 printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
385 printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
386 return -EINVAL;
387 }
388
382 printk(PRINT_PREF "MTD device: %d\n", dev); 389 printk(PRINT_PREF "MTD device: %d\n", dev);
383 390
384 mtd = get_mtd_device(NULL, dev); 391 mtd = get_mtd_device(NULL, dev);
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
index 5c6c3d248901..03ab649a6964 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -46,7 +46,7 @@ static int pgcnt;
46module_param(pgcnt, int, S_IRUGO); 46module_param(pgcnt, int, S_IRUGO);
47MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)"); 47MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)");
48 48
49static int dev; 49static int dev = -EINVAL;
50module_param(dev, int, S_IRUGO); 50module_param(dev, int, S_IRUGO);
51MODULE_PARM_DESC(dev, "MTD device number to use"); 51MODULE_PARM_DESC(dev, "MTD device number to use");
52 52
@@ -138,7 +138,7 @@ static inline int check_eraseblock(int ebnum, unsigned char *buf)
138 138
139retry: 139retry:
140 err = mtd->read(mtd, addr, len, &read, check_buf); 140 err = mtd->read(mtd, addr, len, &read, check_buf);
141 if (err == -EUCLEAN) 141 if (mtd_is_bitflip(err))
142 printk(PRINT_PREF "single bit flip occurred at EB %d " 142 printk(PRINT_PREF "single bit flip occurred at EB %d "
143 "MTD reported that it was fixed.\n", ebnum); 143 "MTD reported that it was fixed.\n", ebnum);
144 else if (err) { 144 else if (err) {
@@ -213,6 +213,13 @@ static int __init tort_init(void)
213 printk(KERN_INFO "=================================================\n"); 213 printk(KERN_INFO "=================================================\n");
214 printk(PRINT_PREF "Warning: this program is trying to wear out your " 214 printk(PRINT_PREF "Warning: this program is trying to wear out your "
215 "flash, stop it if this is not wanted.\n"); 215 "flash, stop it if this is not wanted.\n");
216
217 if (dev < 0) {
218 printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
219 printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
220 return -EINVAL;
221 }
222
216 printk(PRINT_PREF "MTD device: %d\n", dev); 223 printk(PRINT_PREF "MTD device: %d\n", dev);
217 printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n", 224 printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n",
218 ebcnt, eb, eb + ebcnt - 1, dev); 225 ebcnt, eb, eb + ebcnt - 1, dev);
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 4be671815014..fb7f19b62d91 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -443,7 +443,7 @@ retry:
443 if (err == UBI_IO_BITFLIPS) { 443 if (err == UBI_IO_BITFLIPS) {
444 scrub = 1; 444 scrub = 1;
445 err = 0; 445 err = 0;
446 } else if (err == -EBADMSG) { 446 } else if (mtd_is_eccerr(err)) {
447 if (vol->vol_type == UBI_DYNAMIC_VOLUME) 447 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
448 goto out_unlock; 448 goto out_unlock;
449 scrub = 1; 449 scrub = 1;
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 6ba55c235873..f20b6f22f240 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -172,9 +172,9 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
172retry: 172retry:
173 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); 173 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
174 if (err) { 174 if (err) {
175 const char *errstr = (err == -EBADMSG) ? " (ECC error)" : ""; 175 const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
176 176
177 if (err == -EUCLEAN) { 177 if (mtd_is_bitflip(err)) {
178 /* 178 /*
179 * -EUCLEAN is reported if there was a bit-flip which 179 * -EUCLEAN is reported if there was a bit-flip which
180 * was corrected, so this is harmless. 180 * was corrected, so this is harmless.
@@ -205,7 +205,7 @@ retry:
205 * all the requested data. But some buggy drivers might do 205 * all the requested data. But some buggy drivers might do
206 * this, so we change it to -EIO. 206 * this, so we change it to -EIO.
207 */ 207 */
208 if (read != len && err == -EBADMSG) { 208 if (read != len && mtd_is_eccerr(err)) {
209 ubi_assert(0); 209 ubi_assert(0);
210 err = -EIO; 210 err = -EIO;
211 } 211 }
@@ -469,7 +469,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
469 469
470out: 470out:
471 mutex_unlock(&ubi->buf_mutex); 471 mutex_unlock(&ubi->buf_mutex);
472 if (err == UBI_IO_BITFLIPS || err == -EBADMSG) { 472 if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
473 /* 473 /*
474 * If a bit-flip or data integrity error was detected, the test 474 * If a bit-flip or data integrity error was detected, the test
475 * has not passed because it happened on a freshly erased 475 * has not passed because it happened on a freshly erased
@@ -760,7 +760,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
760 760
761 read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); 761 read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
762 if (read_err) { 762 if (read_err) {
763 if (read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG) 763 if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
764 return read_err; 764 return read_err;
765 765
766 /* 766 /*
@@ -776,7 +776,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
776 776
777 magic = be32_to_cpu(ec_hdr->magic); 777 magic = be32_to_cpu(ec_hdr->magic);
778 if (magic != UBI_EC_HDR_MAGIC) { 778 if (magic != UBI_EC_HDR_MAGIC) {
779 if (read_err == -EBADMSG) 779 if (mtd_is_eccerr(read_err))
780 return UBI_IO_BAD_HDR_EBADMSG; 780 return UBI_IO_BAD_HDR_EBADMSG;
781 781
782 /* 782 /*
@@ -1032,12 +1032,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1032 p = (char *)vid_hdr - ubi->vid_hdr_shift; 1032 p = (char *)vid_hdr - ubi->vid_hdr_shift;
1033 read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, 1033 read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1034 ubi->vid_hdr_alsize); 1034 ubi->vid_hdr_alsize);
1035 if (read_err && read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG) 1035 if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
1036 return read_err; 1036 return read_err;
1037 1037
1038 magic = be32_to_cpu(vid_hdr->magic); 1038 magic = be32_to_cpu(vid_hdr->magic);
1039 if (magic != UBI_VID_HDR_MAGIC) { 1039 if (magic != UBI_VID_HDR_MAGIC) {
1040 if (read_err == -EBADMSG) 1040 if (mtd_is_eccerr(read_err))
1041 return UBI_IO_BAD_HDR_EBADMSG; 1041 return UBI_IO_BAD_HDR_EBADMSG;
1042 1042
1043 if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { 1043 if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
@@ -1219,7 +1219,7 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
1219 return -ENOMEM; 1219 return -ENOMEM;
1220 1220
1221 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); 1221 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
1222 if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) 1222 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
1223 goto exit; 1223 goto exit;
1224 1224
1225 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); 1225 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
@@ -1306,7 +1306,7 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
1306 p = (char *)vid_hdr - ubi->vid_hdr_shift; 1306 p = (char *)vid_hdr - ubi->vid_hdr_shift;
1307 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, 1307 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1308 ubi->vid_hdr_alsize); 1308 ubi->vid_hdr_alsize);
1309 if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) 1309 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
1310 goto exit; 1310 goto exit;
1311 1311
1312 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); 1312 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
@@ -1358,7 +1358,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
1358 } 1358 }
1359 1359
1360 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1); 1360 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1);
1361 if (err && err != -EUCLEAN) 1361 if (err && !mtd_is_bitflip(err))
1362 goto out_free; 1362 goto out_free;
1363 1363
1364 for (i = 0; i < len; i++) { 1364 for (i = 0; i < len; i++) {
@@ -1422,7 +1422,7 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1422 } 1422 }
1423 1423
1424 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); 1424 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
1425 if (err && err != -EUCLEAN) { 1425 if (err && !mtd_is_bitflip(err)) {
1426 ubi_err("error %d while reading %d bytes from PEB %d:%d, " 1426 ubi_err("error %d while reading %d bytes from PEB %d:%d, "
1427 "read %zd bytes", err, len, pnum, offset, read); 1427 "read %zd bytes", err, len, pnum, offset, read);
1428 goto error; 1428 goto error;
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index d39716e5b204..1a35fc5e3b40 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -410,7 +410,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
410 return 0; 410 return 0;
411 411
412 err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check); 412 err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
413 if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) { 413 if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
414 ubi_warn("mark volume %d as corrupted", vol_id); 414 ubi_warn("mark volume %d as corrupted", vol_id);
415 vol->corrupted = 1; 415 vol->corrupted = 1;
416 } 416 }
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index ff2a65c37f69..f6a7d7ac4b98 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -81,7 +81,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
81 81
82 err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1); 82 err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
83 if (err) { 83 if (err) {
84 if (err == -EBADMSG) 84 if (mtd_is_eccerr(err))
85 err = 1; 85 err = 1;
86 break; 86 break;
87 } 87 }
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index a3a198f9b98d..0cb17d936b5a 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -395,7 +395,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
395 } 395 }
396 396
397 err = ubi_io_read_data(ubi, buf, pnum, 0, len); 397 err = ubi_io_read_data(ubi, buf, pnum, 0, len);
398 if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) 398 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
399 goto out_free_buf; 399 goto out_free_buf;
400 400
401 data_crc = be32_to_cpu(vid_hdr->data_crc); 401 data_crc = be32_to_cpu(vid_hdr->data_crc);
@@ -793,7 +793,7 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
793 793
794 err = ubi_io_read(ubi, ubi->peb_buf1, pnum, ubi->leb_start, 794 err = ubi_io_read(ubi, ubi->peb_buf1, pnum, ubi->leb_start,
795 ubi->leb_size); 795 ubi->leb_size);
796 if (err == UBI_IO_BITFLIPS || err == -EBADMSG) { 796 if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
797 /* 797 /*
798 * Bit-flips or integrity errors while reading the data area. 798 * Bit-flips or integrity errors while reading the data area.
799 * It is difficult to say for sure what type of corruption is 799 * It is difficult to say for sure what type of corruption is
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 4b50a3029b84..9ad18da1891d 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -423,7 +423,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
423 423
424 err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0, 424 err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
425 ubi->vtbl_size); 425 ubi->vtbl_size);
426 if (err == UBI_IO_BITFLIPS || err == -EBADMSG) 426 if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err))
427 /* 427 /*
428 * Scrub the PEB later. Note, -EBADMSG indicates an 428 * Scrub the PEB later. Note, -EBADMSG indicates an
429 * uncorrectable ECC error, but we have our own CRC and 429 * uncorrectable ECC error, but we have our own CRC and
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b2b9109b6712..b0c577256487 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -560,8 +560,8 @@ static int bond_update_speed_duplex(struct slave *slave)
560 u32 slave_speed; 560 u32 slave_speed;
561 int res; 561 int res;
562 562
563 slave->speed = -1; 563 slave->speed = SPEED_UNKNOWN;
564 slave->duplex = -1; 564 slave->duplex = DUPLEX_UNKNOWN;
565 565
566 res = __ethtool_get_settings(slave_dev, &ecmd); 566 res = __ethtool_get_settings(slave_dev, &ecmd);
567 if (res < 0) 567 if (res < 0)
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 2acf0b080169..ad284baafe87 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -158,12 +158,12 @@ static void bond_info_show_slave(struct seq_file *seq,
158 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); 158 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
159 seq_printf(seq, "MII Status: %s\n", 159 seq_printf(seq, "MII Status: %s\n",
160 (slave->link == BOND_LINK_UP) ? "up" : "down"); 160 (slave->link == BOND_LINK_UP) ? "up" : "down");
161 if (slave->speed == -1) 161 if (slave->speed == SPEED_UNKNOWN)
162 seq_printf(seq, "Speed: %s\n", "Unknown"); 162 seq_printf(seq, "Speed: %s\n", "Unknown");
163 else 163 else
164 seq_printf(seq, "Speed: %d Mbps\n", slave->speed); 164 seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
165 165
166 if (slave->duplex == -1) 166 if (slave->duplex == DUPLEX_UNKNOWN)
167 seq_printf(seq, "Duplex: %s\n", "Unknown"); 167 seq_printf(seq, "Duplex: %s\n", "Unknown");
168 else 168 else
169 seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half"); 169 seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index be5dde040261..94b7f287d6c5 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
10obj-$(CONFIG_NET_VENDOR_AMD) += amd/ 10obj-$(CONFIG_NET_VENDOR_AMD) += amd/
11obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ 11obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
12obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ 12obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
13obj-$(CONFIG_NET_ATMEL) += cadence/ 13obj-$(CONFIG_NET_CADENCE) += cadence/
14obj-$(CONFIG_NET_BFIN) += adi/ 14obj-$(CONFIG_NET_BFIN) += adi/
15obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ 15obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
16obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ 16obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 161cbbb4814a..bf4074167d6a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -89,10 +89,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
89 89
90#define DRV_MODULE_NAME "tg3" 90#define DRV_MODULE_NAME "tg3"
91#define TG3_MAJ_NUM 3 91#define TG3_MAJ_NUM 3
92#define TG3_MIN_NUM 120 92#define TG3_MIN_NUM 121
93#define DRV_MODULE_VERSION \ 93#define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95#define DRV_MODULE_RELDATE "August 18, 2011" 95#define DRV_MODULE_RELDATE "November 2, 2011"
96 96
97#define RESET_KIND_SHUTDOWN 0 97#define RESET_KIND_SHUTDOWN 0
98#define RESET_KIND_INIT 1 98#define RESET_KIND_INIT 1
@@ -628,19 +628,23 @@ static void tg3_ape_lock_init(struct tg3 *tp)
628 regbase = TG3_APE_PER_LOCK_GRANT; 628 regbase = TG3_APE_PER_LOCK_GRANT;
629 629
630 /* Make sure the driver hasn't any stale locks. */ 630 /* Make sure the driver hasn't any stale locks. */
631 for (i = 0; i < 8; i++) { 631 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632 if (i == TG3_APE_LOCK_GPIO) 632 switch (i) {
633 continue; 633 case TG3_APE_LOCK_PHY0:
634 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER); 634 case TG3_APE_LOCK_PHY1:
635 case TG3_APE_LOCK_PHY2:
636 case TG3_APE_LOCK_PHY3:
637 bit = APE_LOCK_GRANT_DRIVER;
638 break;
639 default:
640 if (!tp->pci_fn)
641 bit = APE_LOCK_GRANT_DRIVER;
642 else
643 bit = 1 << tp->pci_fn;
644 }
645 tg3_ape_write32(tp, regbase + 4 * i, bit);
635 } 646 }
636 647
637 /* Clear the correct bit of the GPIO lock too. */
638 if (!tp->pci_fn)
639 bit = APE_LOCK_GRANT_DRIVER;
640 else
641 bit = 1 << tp->pci_fn;
642
643 tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
644} 648}
645 649
646static int tg3_ape_lock(struct tg3 *tp, int locknum) 650static int tg3_ape_lock(struct tg3 *tp, int locknum)
@@ -658,6 +662,10 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
658 return 0; 662 return 0;
659 case TG3_APE_LOCK_GRC: 663 case TG3_APE_LOCK_GRC:
660 case TG3_APE_LOCK_MEM: 664 case TG3_APE_LOCK_MEM:
665 if (!tp->pci_fn)
666 bit = APE_LOCK_REQ_DRIVER;
667 else
668 bit = 1 << tp->pci_fn;
661 break; 669 break;
662 default: 670 default:
663 return -EINVAL; 671 return -EINVAL;
@@ -673,11 +681,6 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
673 681
674 off = 4 * locknum; 682 off = 4 * locknum;
675 683
676 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
677 bit = APE_LOCK_REQ_DRIVER;
678 else
679 bit = 1 << tp->pci_fn;
680
681 tg3_ape_write32(tp, req + off, bit); 684 tg3_ape_write32(tp, req + off, bit);
682 685
683 /* Wait for up to 1 millisecond to acquire lock. */ 686 /* Wait for up to 1 millisecond to acquire lock. */
@@ -710,6 +713,10 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
710 return; 713 return;
711 case TG3_APE_LOCK_GRC: 714 case TG3_APE_LOCK_GRC:
712 case TG3_APE_LOCK_MEM: 715 case TG3_APE_LOCK_MEM:
716 if (!tp->pci_fn)
717 bit = APE_LOCK_GRANT_DRIVER;
718 else
719 bit = 1 << tp->pci_fn;
713 break; 720 break;
714 default: 721 default:
715 return; 722 return;
@@ -720,11 +727,6 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
720 else 727 else
721 gnt = TG3_APE_PER_LOCK_GRANT; 728 gnt = TG3_APE_PER_LOCK_GRANT;
722 729
723 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
724 bit = APE_LOCK_GRANT_DRIVER;
725 else
726 bit = 1 << tp->pci_fn;
727
728 tg3_ape_write32(tp, gnt + 4 * locknum, bit); 730 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
729} 731}
730 732
@@ -5927,6 +5929,18 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5927 return work_done; 5929 return work_done;
5928} 5930}
5929 5931
5932static inline void tg3_reset_task_schedule(struct tg3 *tp)
5933{
5934 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5935 schedule_work(&tp->reset_task);
5936}
5937
5938static inline void tg3_reset_task_cancel(struct tg3 *tp)
5939{
5940 cancel_work_sync(&tp->reset_task);
5941 tg3_flag_clear(tp, RESET_TASK_PENDING);
5942}
5943
5930static int tg3_poll_msix(struct napi_struct *napi, int budget) 5944static int tg3_poll_msix(struct napi_struct *napi, int budget)
5931{ 5945{
5932 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 5946 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
@@ -5967,7 +5981,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
5967tx_recovery: 5981tx_recovery:
5968 /* work_done is guaranteed to be less than budget. */ 5982 /* work_done is guaranteed to be less than budget. */
5969 napi_complete(napi); 5983 napi_complete(napi);
5970 schedule_work(&tp->reset_task); 5984 tg3_reset_task_schedule(tp);
5971 return work_done; 5985 return work_done;
5972} 5986}
5973 5987
@@ -6002,7 +6016,7 @@ static void tg3_process_error(struct tg3 *tp)
6002 tg3_dump_state(tp); 6016 tg3_dump_state(tp);
6003 6017
6004 tg3_flag_set(tp, ERROR_PROCESSED); 6018 tg3_flag_set(tp, ERROR_PROCESSED);
6005 schedule_work(&tp->reset_task); 6019 tg3_reset_task_schedule(tp);
6006} 6020}
6007 6021
6008static int tg3_poll(struct napi_struct *napi, int budget) 6022static int tg3_poll(struct napi_struct *napi, int budget)
@@ -6049,7 +6063,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
6049tx_recovery: 6063tx_recovery:
6050 /* work_done is guaranteed to be less than budget. */ 6064 /* work_done is guaranteed to be less than budget. */
6051 napi_complete(napi); 6065 napi_complete(napi);
6052 schedule_work(&tp->reset_task); 6066 tg3_reset_task_schedule(tp);
6053 return work_done; 6067 return work_done;
6054} 6068}
6055 6069
@@ -6338,11 +6352,11 @@ static void tg3_reset_task(struct work_struct *work)
6338{ 6352{
6339 struct tg3 *tp = container_of(work, struct tg3, reset_task); 6353 struct tg3 *tp = container_of(work, struct tg3, reset_task);
6340 int err; 6354 int err;
6341 unsigned int restart_timer;
6342 6355
6343 tg3_full_lock(tp, 0); 6356 tg3_full_lock(tp, 0);
6344 6357
6345 if (!netif_running(tp->dev)) { 6358 if (!netif_running(tp->dev)) {
6359 tg3_flag_clear(tp, RESET_TASK_PENDING);
6346 tg3_full_unlock(tp); 6360 tg3_full_unlock(tp);
6347 return; 6361 return;
6348 } 6362 }
@@ -6355,9 +6369,6 @@ static void tg3_reset_task(struct work_struct *work)
6355 6369
6356 tg3_full_lock(tp, 1); 6370 tg3_full_lock(tp, 1);
6357 6371
6358 restart_timer = tg3_flag(tp, RESTART_TIMER);
6359 tg3_flag_clear(tp, RESTART_TIMER);
6360
6361 if (tg3_flag(tp, TX_RECOVERY_PENDING)) { 6372 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6362 tp->write32_tx_mbox = tg3_write32_tx_mbox; 6373 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6363 tp->write32_rx_mbox = tg3_write_flush_reg32; 6374 tp->write32_rx_mbox = tg3_write_flush_reg32;
@@ -6372,14 +6383,13 @@ static void tg3_reset_task(struct work_struct *work)
6372 6383
6373 tg3_netif_start(tp); 6384 tg3_netif_start(tp);
6374 6385
6375 if (restart_timer)
6376 mod_timer(&tp->timer, jiffies + 1);
6377
6378out: 6386out:
6379 tg3_full_unlock(tp); 6387 tg3_full_unlock(tp);
6380 6388
6381 if (!err) 6389 if (!err)
6382 tg3_phy_start(tp); 6390 tg3_phy_start(tp);
6391
6392 tg3_flag_clear(tp, RESET_TASK_PENDING);
6383} 6393}
6384 6394
6385static void tg3_tx_timeout(struct net_device *dev) 6395static void tg3_tx_timeout(struct net_device *dev)
@@ -6391,7 +6401,7 @@ static void tg3_tx_timeout(struct net_device *dev)
6391 tg3_dump_state(tp); 6401 tg3_dump_state(tp);
6392 } 6402 }
6393 6403
6394 schedule_work(&tp->reset_task); 6404 tg3_reset_task_schedule(tp);
6395} 6405}
6396 6406
6397/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 6407/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
@@ -6442,31 +6452,26 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6442 hwbug = 1; 6452 hwbug = 1;
6443 6453
6444 if (tg3_flag(tp, 4K_FIFO_LIMIT)) { 6454 if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6455 u32 prvidx = *entry;
6445 u32 tmp_flag = flags & ~TXD_FLAG_END; 6456 u32 tmp_flag = flags & ~TXD_FLAG_END;
6446 while (len > TG3_TX_BD_DMA_MAX) { 6457 while (len > TG3_TX_BD_DMA_MAX && *budget) {
6447 u32 frag_len = TG3_TX_BD_DMA_MAX; 6458 u32 frag_len = TG3_TX_BD_DMA_MAX;
6448 len -= TG3_TX_BD_DMA_MAX; 6459 len -= TG3_TX_BD_DMA_MAX;
6449 6460
6450 if (len) { 6461 /* Avoid the 8byte DMA problem */
6451 tnapi->tx_buffers[*entry].fragmented = true; 6462 if (len <= 8) {
6452 /* Avoid the 8byte DMA problem */ 6463 len += TG3_TX_BD_DMA_MAX / 2;
6453 if (len <= 8) { 6464 frag_len = TG3_TX_BD_DMA_MAX / 2;
6454 len += TG3_TX_BD_DMA_MAX / 2;
6455 frag_len = TG3_TX_BD_DMA_MAX / 2;
6456 }
6457 } else
6458 tmp_flag = flags;
6459
6460 if (*budget) {
6461 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6462 frag_len, tmp_flag, mss, vlan);
6463 (*budget)--;
6464 *entry = NEXT_TX(*entry);
6465 } else {
6466 hwbug = 1;
6467 break;
6468 } 6465 }
6469 6466
6467 tnapi->tx_buffers[*entry].fragmented = true;
6468
6469 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6470 frag_len, tmp_flag, mss, vlan);
6471 *budget -= 1;
6472 prvidx = *entry;
6473 *entry = NEXT_TX(*entry);
6474
6470 map += frag_len; 6475 map += frag_len;
6471 } 6476 }
6472 6477
@@ -6474,10 +6479,11 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6474 if (*budget) { 6479 if (*budget) {
6475 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 6480 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6476 len, flags, mss, vlan); 6481 len, flags, mss, vlan);
6477 (*budget)--; 6482 *budget -= 1;
6478 *entry = NEXT_TX(*entry); 6483 *entry = NEXT_TX(*entry);
6479 } else { 6484 } else {
6480 hwbug = 1; 6485 hwbug = 1;
6486 tnapi->tx_buffers[prvidx].fragmented = false;
6481 } 6487 }
6482 } 6488 }
6483 } else { 6489 } else {
@@ -6509,7 +6515,7 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6509 txb = &tnapi->tx_buffers[entry]; 6515 txb = &tnapi->tx_buffers[entry];
6510 } 6516 }
6511 6517
6512 for (i = 0; i < last; i++) { 6518 for (i = 0; i <= last; i++) {
6513 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6519 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6514 6520
6515 entry = NEXT_TX(entry); 6521 entry = NEXT_TX(entry);
@@ -6559,6 +6565,8 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6559 dev_kfree_skb(new_skb); 6565 dev_kfree_skb(new_skb);
6560 ret = -1; 6566 ret = -1;
6561 } else { 6567 } else {
6568 u32 save_entry = *entry;
6569
6562 base_flags |= TXD_FLAG_END; 6570 base_flags |= TXD_FLAG_END;
6563 6571
6564 tnapi->tx_buffers[*entry].skb = new_skb; 6572 tnapi->tx_buffers[*entry].skb = new_skb;
@@ -6568,7 +6576,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6568 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, 6576 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6569 new_skb->len, base_flags, 6577 new_skb->len, base_flags,
6570 mss, vlan)) { 6578 mss, vlan)) {
6571 tg3_tx_skb_unmap(tnapi, *entry, 0); 6579 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6572 dev_kfree_skb(new_skb); 6580 dev_kfree_skb(new_skb);
6573 ret = -1; 6581 ret = -1;
6574 } 6582 }
@@ -6758,11 +6766,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6758 6766
6759 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | 6767 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6760 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), 6768 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6761 mss, vlan)) 6769 mss, vlan)) {
6762 would_hit_hwbug = 1; 6770 would_hit_hwbug = 1;
6763
6764 /* Now loop through additional data fragments, and queue them. */ 6771 /* Now loop through additional data fragments, and queue them. */
6765 if (skb_shinfo(skb)->nr_frags > 0) { 6772 } else if (skb_shinfo(skb)->nr_frags > 0) {
6766 u32 tmp_mss = mss; 6773 u32 tmp_mss = mss;
6767 6774
6768 if (!tg3_flag(tp, HW_TSO_1) && 6775 if (!tg3_flag(tp, HW_TSO_1) &&
@@ -6784,11 +6791,14 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6784 if (dma_mapping_error(&tp->pdev->dev, mapping)) 6791 if (dma_mapping_error(&tp->pdev->dev, mapping))
6785 goto dma_error; 6792 goto dma_error;
6786 6793
6787 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, 6794 if (!budget ||
6795 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6788 len, base_flags | 6796 len, base_flags |
6789 ((i == last) ? TXD_FLAG_END : 0), 6797 ((i == last) ? TXD_FLAG_END : 0),
6790 tmp_mss, vlan)) 6798 tmp_mss, vlan)) {
6791 would_hit_hwbug = 1; 6799 would_hit_hwbug = 1;
6800 break;
6801 }
6792 } 6802 }
6793 } 6803 }
6794 6804
@@ -6828,7 +6838,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6828 return NETDEV_TX_OK; 6838 return NETDEV_TX_OK;
6829 6839
6830dma_error: 6840dma_error:
6831 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 6841 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6832 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 6842 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6833drop: 6843drop:
6834 dev_kfree_skb(skb); 6844 dev_kfree_skb(skb);
@@ -7281,7 +7291,8 @@ static void tg3_free_rings(struct tg3 *tp)
7281 if (!skb) 7291 if (!skb)
7282 continue; 7292 continue;
7283 7293
7284 tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags); 7294 tg3_tx_skb_unmap(tnapi, i,
7295 skb_shinfo(skb)->nr_frags - 1);
7285 7296
7286 dev_kfree_skb_any(skb); 7297 dev_kfree_skb_any(skb);
7287 } 7298 }
@@ -9200,7 +9211,7 @@ static void tg3_timer(unsigned long __opaque)
9200{ 9211{
9201 struct tg3 *tp = (struct tg3 *) __opaque; 9212 struct tg3 *tp = (struct tg3 *) __opaque;
9202 9213
9203 if (tp->irq_sync) 9214 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9204 goto restart_timer; 9215 goto restart_timer;
9205 9216
9206 spin_lock(&tp->lock); 9217 spin_lock(&tp->lock);
@@ -9223,10 +9234,9 @@ static void tg3_timer(unsigned long __opaque)
9223 } 9234 }
9224 9235
9225 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 9236 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9226 tg3_flag_set(tp, RESTART_TIMER);
9227 spin_unlock(&tp->lock); 9237 spin_unlock(&tp->lock);
9228 schedule_work(&tp->reset_task); 9238 tg3_reset_task_schedule(tp);
9229 return; 9239 goto restart_timer;
9230 } 9240 }
9231 } 9241 }
9232 9242
@@ -9674,15 +9684,14 @@ static int tg3_open(struct net_device *dev)
9674 struct tg3_napi *tnapi = &tp->napi[i]; 9684 struct tg3_napi *tnapi = &tp->napi[i];
9675 err = tg3_request_irq(tp, i); 9685 err = tg3_request_irq(tp, i);
9676 if (err) { 9686 if (err) {
9677 for (i--; i >= 0; i--) 9687 for (i--; i >= 0; i--) {
9688 tnapi = &tp->napi[i];
9678 free_irq(tnapi->irq_vec, tnapi); 9689 free_irq(tnapi->irq_vec, tnapi);
9679 break; 9690 }
9691 goto err_out2;
9680 } 9692 }
9681 } 9693 }
9682 9694
9683 if (err)
9684 goto err_out2;
9685
9686 tg3_full_lock(tp, 0); 9695 tg3_full_lock(tp, 0);
9687 9696
9688 err = tg3_init_hw(tp, 1); 9697 err = tg3_init_hw(tp, 1);
@@ -9783,7 +9792,7 @@ static int tg3_close(struct net_device *dev)
9783 struct tg3 *tp = netdev_priv(dev); 9792 struct tg3 *tp = netdev_priv(dev);
9784 9793
9785 tg3_napi_disable(tp); 9794 tg3_napi_disable(tp);
9786 cancel_work_sync(&tp->reset_task); 9795 tg3_reset_task_cancel(tp);
9787 9796
9788 netif_tx_stop_all_queues(dev); 9797 netif_tx_stop_all_queues(dev);
9789 9798
@@ -11520,7 +11529,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11520 break; 11529 break;
11521 } 11530 }
11522 11531
11523 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0); 11532 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11524 dev_kfree_skb(skb); 11533 dev_kfree_skb(skb);
11525 11534
11526 if (tx_idx != tnapi->tx_prod) 11535 if (tx_idx != tnapi->tx_prod)
@@ -14228,12 +14237,30 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
14228 val = tr32(MEMARB_MODE); 14237 val = tr32(MEMARB_MODE);
14229 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 14238 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14230 14239
14231 if (tg3_flag(tp, PCIX_MODE)) { 14240 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14232 pci_read_config_dword(tp->pdev, 14241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14233 tp->pcix_cap + PCI_X_STATUS, &val); 14242 tg3_flag(tp, 5780_CLASS)) {
14234 tp->pci_fn = val & 0x7; 14243 if (tg3_flag(tp, PCIX_MODE)) {
14235 } else { 14244 pci_read_config_dword(tp->pdev,
14236 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; 14245 tp->pcix_cap + PCI_X_STATUS,
14246 &val);
14247 tp->pci_fn = val & 0x7;
14248 }
14249 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14250 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14251 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14252 NIC_SRAM_CPMUSTAT_SIG) {
14253 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14254 tp->pci_fn = tp->pci_fn ? 1 : 0;
14255 }
14256 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14257 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14258 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14259 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14260 NIC_SRAM_CPMUSTAT_SIG) {
14261 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14262 TG3_CPMU_STATUS_FSHFT_5719;
14263 }
14237 } 14264 }
14238 14265
14239 /* Get eeprom hw config before calling tg3_set_power_state(). 14266 /* Get eeprom hw config before calling tg3_set_power_state().
@@ -15665,7 +15692,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
15665 if (tp->fw) 15692 if (tp->fw)
15666 release_firmware(tp->fw); 15693 release_firmware(tp->fw);
15667 15694
15668 cancel_work_sync(&tp->reset_task); 15695 tg3_reset_task_cancel(tp);
15669 15696
15670 if (tg3_flag(tp, USE_PHYLIB)) { 15697 if (tg3_flag(tp, USE_PHYLIB)) {
15671 tg3_phy_fini(tp); 15698 tg3_phy_fini(tp);
@@ -15699,7 +15726,7 @@ static int tg3_suspend(struct device *device)
15699 if (!netif_running(dev)) 15726 if (!netif_running(dev))
15700 return 0; 15727 return 0;
15701 15728
15702 flush_work_sync(&tp->reset_task); 15729 tg3_reset_task_cancel(tp);
15703 tg3_phy_stop(tp); 15730 tg3_phy_stop(tp);
15704 tg3_netif_stop(tp); 15731 tg3_netif_stop(tp);
15705 15732
@@ -15812,12 +15839,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15812 tg3_netif_stop(tp); 15839 tg3_netif_stop(tp);
15813 15840
15814 del_timer_sync(&tp->timer); 15841 del_timer_sync(&tp->timer);
15815 tg3_flag_clear(tp, RESTART_TIMER);
15816 15842
15817 /* Want to make sure that the reset task doesn't run */ 15843 /* Want to make sure that the reset task doesn't run */
15818 cancel_work_sync(&tp->reset_task); 15844 tg3_reset_task_cancel(tp);
15819 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 15845 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15820 tg3_flag_clear(tp, RESTART_TIMER);
15821 15846
15822 netif_device_detach(netdev); 15847 netif_device_detach(netdev);
15823 15848
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index f32f288134c7..94b4bd049a33 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1095,6 +1095,11 @@
1095#define TG3_CPMU_CLCK_ORIDE 0x00003624 1095#define TG3_CPMU_CLCK_ORIDE 0x00003624
1096#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000 1096#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000
1097 1097
1098#define TG3_CPMU_STATUS 0x0000362c
1099#define TG3_CPMU_STATUS_FMSK_5717 0x20000000
1100#define TG3_CPMU_STATUS_FMSK_5719 0xc0000000
1101#define TG3_CPMU_STATUS_FSHFT_5719 30
1102
1098#define TG3_CPMU_CLCK_STAT 0x00003630 1103#define TG3_CPMU_CLCK_STAT 0x00003630
1099#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 1104#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000
1100#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 1105#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000
@@ -2128,6 +2133,10 @@
2128#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008 2133#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008
2129#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010 2134#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010
2130 2135
2136#define NIC_SRAM_CPMU_STATUS 0x00000e00
2137#define NIC_SRAM_CPMUSTAT_SIG 0x0000362c
2138#define NIC_SRAM_CPMUSTAT_SIG_MSK 0x0000ffff
2139
2131#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000 2140#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000
2132 2141
2133#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000 2142#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000
@@ -2344,9 +2353,13 @@
2344#define APE_PER_LOCK_GRANT_DRIVER 0x00001000 2353#define APE_PER_LOCK_GRANT_DRIVER 0x00001000
2345 2354
2346/* APE convenience enumerations. */ 2355/* APE convenience enumerations. */
2347#define TG3_APE_LOCK_GRC 1 2356#define TG3_APE_LOCK_PHY0 0
2348#define TG3_APE_LOCK_MEM 4 2357#define TG3_APE_LOCK_GRC 1
2349#define TG3_APE_LOCK_GPIO 7 2358#define TG3_APE_LOCK_PHY1 2
2359#define TG3_APE_LOCK_PHY2 3
2360#define TG3_APE_LOCK_MEM 4
2361#define TG3_APE_LOCK_PHY3 5
2362#define TG3_APE_LOCK_GPIO 7
2350 2363
2351#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 2364#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
2352 2365
@@ -2866,7 +2879,6 @@ enum TG3_FLAGS {
2866 TG3_FLAG_JUMBO_CAPABLE, 2879 TG3_FLAG_JUMBO_CAPABLE,
2867 TG3_FLAG_CHIP_RESETTING, 2880 TG3_FLAG_CHIP_RESETTING,
2868 TG3_FLAG_INIT_COMPLETE, 2881 TG3_FLAG_INIT_COMPLETE,
2869 TG3_FLAG_RESTART_TIMER,
2870 TG3_FLAG_TSO_BUG, 2882 TG3_FLAG_TSO_BUG,
2871 TG3_FLAG_IS_5788, 2883 TG3_FLAG_IS_5788,
2872 TG3_FLAG_MAX_RXPEND_64, 2884 TG3_FLAG_MAX_RXPEND_64,
@@ -2909,6 +2921,7 @@ enum TG3_FLAGS {
2909 TG3_FLAG_APE_HAS_NCSI, 2921 TG3_FLAG_APE_HAS_NCSI,
2910 TG3_FLAG_5717_PLUS, 2922 TG3_FLAG_5717_PLUS,
2911 TG3_FLAG_4K_FIFO_LIMIT, 2923 TG3_FLAG_4K_FIFO_LIMIT,
2924 TG3_FLAG_RESET_TASK_PENDING,
2912 2925
2913 /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */ 2926 /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
2914 TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ 2927 TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 98849a1fc749..a2e150059bc7 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -5,8 +5,8 @@
5config HAVE_NET_MACB 5config HAVE_NET_MACB
6 bool 6 bool
7 7
8config NET_ATMEL 8config NET_CADENCE
9 bool "Atmel devices" 9 bool "Cadence devices"
10 depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200) 10 depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
11 ---help--- 11 ---help---
12 If you have a network (Ethernet) card belonging to this class, say Y. 12 If you have a network (Ethernet) card belonging to this class, say Y.
@@ -20,7 +20,7 @@ config NET_ATMEL
20 the remaining Atmel network card questions. If you say Y, you will be 20 the remaining Atmel network card questions. If you say Y, you will be
21 asked for your specific card in the following questions. 21 asked for your specific card in the following questions.
22 22
23if NET_ATMEL 23if NET_CADENCE
24 24
25config ARM_AT91_ETHER 25config ARM_AT91_ETHER
26 tristate "AT91RM9200 Ethernet support" 26 tristate "AT91RM9200 Ethernet support"
@@ -32,14 +32,16 @@ config ARM_AT91_ETHER
32 ethernet support, then you should always answer Y to this. 32 ethernet support, then you should always answer Y to this.
33 33
34config MACB 34config MACB
35 tristate "Atmel MACB support" 35 tristate "Cadence MACB/GEM support"
36 depends on HAVE_NET_MACB 36 depends on HAVE_NET_MACB
37 select PHYLIB 37 select PHYLIB
38 ---help--- 38 ---help---
39 The Atmel MACB ethernet interface is found on many AT32 and AT91 39 The Cadence MACB ethernet interface is found on many Atmel AT32 and
40 parts. Say Y to include support for the MACB chip. 40 AT91 parts. This driver also supports the Cadence GEM (Gigabit
41 Ethernet MAC found in some ARM SoC devices). Note: the Gigabit mode
42 is not yet supported. Say Y to include support for the MACB/GEM chip.
41 43
42 To compile this driver as a module, choose M here: the module 44 To compile this driver as a module, choose M here: the module
43 will be called macb. 45 will be called macb.
44 46
45endif # NET_ATMEL 47endif # NET_CADENCE
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 56624d303487..dfeb46cb3f74 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -26,6 +26,7 @@
26#include <linux/skbuff.h> 26#include <linux/skbuff.h>
27#include <linux/dma-mapping.h> 27#include <linux/dma-mapping.h>
28#include <linux/ethtool.h> 28#include <linux/ethtool.h>
29#include <linux/platform_data/macb.h>
29#include <linux/platform_device.h> 30#include <linux/platform_device.h>
30#include <linux/clk.h> 31#include <linux/clk.h>
31#include <linux/gfp.h> 32#include <linux/gfp.h>
@@ -984,7 +985,7 @@ static const struct net_device_ops at91ether_netdev_ops = {
984static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address, 985static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address,
985 struct platform_device *pdev, struct clk *ether_clk) 986 struct platform_device *pdev, struct clk *ether_clk)
986{ 987{
987 struct at91_eth_data *board_data = pdev->dev.platform_data; 988 struct macb_platform_data *board_data = pdev->dev.platform_data;
988 struct net_device *dev; 989 struct net_device *dev;
989 struct at91_private *lp; 990 struct at91_private *lp;
990 unsigned int val; 991 unsigned int val;
diff --git a/drivers/net/ethernet/cadence/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h
index 353f4dab62be..3725fbb0defe 100644
--- a/drivers/net/ethernet/cadence/at91_ether.h
+++ b/drivers/net/ethernet/cadence/at91_ether.h
@@ -85,7 +85,9 @@ struct recv_desc_bufs
85struct at91_private 85struct at91_private
86{ 86{
87 struct mii_if_info mii; /* ethtool support */ 87 struct mii_if_info mii; /* ethtool support */
88 struct at91_eth_data board_data; /* board-specific configuration */ 88 struct macb_platform_data board_data; /* board-specific
89 * configuration (shared with
90 * macb for common data */
89 struct clk *ether_clk; /* clock */ 91 struct clk *ether_clk; /* clock */
90 92
91 /* PHY */ 93 /* PHY */
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index a437b46e5490..64d61461bdc7 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Atmel MACB Ethernet Controller driver 2 * Cadence MACB/GEM Ethernet Controller driver
3 * 3 *
4 * Copyright (C) 2004-2006 Atmel Corporation 4 * Copyright (C) 2004-2006 Atmel Corporation
5 * 5 *
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11#include <linux/clk.h> 12#include <linux/clk.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
@@ -19,12 +20,10 @@
19#include <linux/netdevice.h> 20#include <linux/netdevice.h>
20#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
21#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <linux/platform_data/macb.h>
22#include <linux/platform_device.h> 24#include <linux/platform_device.h>
23#include <linux/phy.h> 25#include <linux/phy.h>
24 26
25#include <mach/board.h>
26#include <mach/cpu.h>
27
28#include "macb.h" 27#include "macb.h"
29 28
30#define RX_BUFFER_SIZE 128 29#define RX_BUFFER_SIZE 128
@@ -60,9 +59,9 @@ static void __macb_set_hwaddr(struct macb *bp)
60 u16 top; 59 u16 top;
61 60
62 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); 61 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
63 macb_writel(bp, SA1B, bottom); 62 macb_or_gem_writel(bp, SA1B, bottom);
64 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); 63 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
65 macb_writel(bp, SA1T, top); 64 macb_or_gem_writel(bp, SA1T, top);
66} 65}
67 66
68static void __init macb_get_hwaddr(struct macb *bp) 67static void __init macb_get_hwaddr(struct macb *bp)
@@ -71,8 +70,8 @@ static void __init macb_get_hwaddr(struct macb *bp)
71 u16 top; 70 u16 top;
72 u8 addr[6]; 71 u8 addr[6];
73 72
74 bottom = macb_readl(bp, SA1B); 73 bottom = macb_or_gem_readl(bp, SA1B);
75 top = macb_readl(bp, SA1T); 74 top = macb_or_gem_readl(bp, SA1T);
76 75
77 addr[0] = bottom & 0xff; 76 addr[0] = bottom & 0xff;
78 addr[1] = (bottom >> 8) & 0xff; 77 addr[1] = (bottom >> 8) & 0xff;
@@ -84,7 +83,7 @@ static void __init macb_get_hwaddr(struct macb *bp)
84 if (is_valid_ether_addr(addr)) { 83 if (is_valid_ether_addr(addr)) {
85 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 84 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
86 } else { 85 } else {
87 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); 86 netdev_info(bp->dev, "invalid hw address, using random\n");
88 random_ether_addr(bp->dev->dev_addr); 87 random_ether_addr(bp->dev->dev_addr);
89 } 88 }
90} 89}
@@ -178,11 +177,12 @@ static void macb_handle_link_change(struct net_device *dev)
178 177
179 if (status_change) { 178 if (status_change) {
180 if (phydev->link) 179 if (phydev->link)
181 printk(KERN_INFO "%s: link up (%d/%s)\n", 180 netdev_info(dev, "link up (%d/%s)\n",
182 dev->name, phydev->speed, 181 phydev->speed,
183 DUPLEX_FULL == phydev->duplex ? "Full":"Half"); 182 phydev->duplex == DUPLEX_FULL ?
183 "Full" : "Half");
184 else 184 else
185 printk(KERN_INFO "%s: link down\n", dev->name); 185 netdev_info(dev, "link down\n");
186 } 186 }
187} 187}
188 188
@@ -191,12 +191,12 @@ static int macb_mii_probe(struct net_device *dev)
191{ 191{
192 struct macb *bp = netdev_priv(dev); 192 struct macb *bp = netdev_priv(dev);
193 struct phy_device *phydev; 193 struct phy_device *phydev;
194 struct eth_platform_data *pdata; 194 struct macb_platform_data *pdata;
195 int ret; 195 int ret;
196 196
197 phydev = phy_find_first(bp->mii_bus); 197 phydev = phy_find_first(bp->mii_bus);
198 if (!phydev) { 198 if (!phydev) {
199 printk (KERN_ERR "%s: no PHY found\n", dev->name); 199 netdev_err(dev, "no PHY found\n");
200 return -1; 200 return -1;
201 } 201 }
202 202
@@ -209,7 +209,7 @@ static int macb_mii_probe(struct net_device *dev)
209 PHY_INTERFACE_MODE_RMII : 209 PHY_INTERFACE_MODE_RMII :
210 PHY_INTERFACE_MODE_MII); 210 PHY_INTERFACE_MODE_MII);
211 if (ret) { 211 if (ret) {
212 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 212 netdev_err(dev, "Could not attach to PHY\n");
213 return ret; 213 return ret;
214 } 214 }
215 215
@@ -228,7 +228,7 @@ static int macb_mii_probe(struct net_device *dev)
228 228
229static int macb_mii_init(struct macb *bp) 229static int macb_mii_init(struct macb *bp)
230{ 230{
231 struct eth_platform_data *pdata; 231 struct macb_platform_data *pdata;
232 int err = -ENXIO, i; 232 int err = -ENXIO, i;
233 233
234 /* Enable management port */ 234 /* Enable management port */
@@ -285,8 +285,8 @@ err_out:
285static void macb_update_stats(struct macb *bp) 285static void macb_update_stats(struct macb *bp)
286{ 286{
287 u32 __iomem *reg = bp->regs + MACB_PFR; 287 u32 __iomem *reg = bp->regs + MACB_PFR;
288 u32 *p = &bp->hw_stats.rx_pause_frames; 288 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
289 u32 *end = &bp->hw_stats.tx_pause_frames + 1; 289 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
290 290
291 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 291 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
292 292
@@ -303,14 +303,13 @@ static void macb_tx(struct macb *bp)
303 status = macb_readl(bp, TSR); 303 status = macb_readl(bp, TSR);
304 macb_writel(bp, TSR, status); 304 macb_writel(bp, TSR, status);
305 305
306 dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n", 306 netdev_dbg(bp->dev, "macb_tx status = %02lx\n", (unsigned long)status);
307 (unsigned long)status);
308 307
309 if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) { 308 if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) {
310 int i; 309 int i;
311 printk(KERN_ERR "%s: TX %s, resetting buffers\n", 310 netdev_err(bp->dev, "TX %s, resetting buffers\n",
312 bp->dev->name, status & MACB_BIT(UND) ? 311 status & MACB_BIT(UND) ?
313 "underrun" : "retry limit exceeded"); 312 "underrun" : "retry limit exceeded");
314 313
315 /* Transfer ongoing, disable transmitter, to avoid confusion */ 314 /* Transfer ongoing, disable transmitter, to avoid confusion */
316 if (status & MACB_BIT(TGO)) 315 if (status & MACB_BIT(TGO))
@@ -369,8 +368,8 @@ static void macb_tx(struct macb *bp)
369 if (!(bufstat & MACB_BIT(TX_USED))) 368 if (!(bufstat & MACB_BIT(TX_USED)))
370 break; 369 break;
371 370
372 dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n", 371 netdev_dbg(bp->dev, "skb %u (data %p) TX complete\n",
373 tail, skb->data); 372 tail, skb->data);
374 dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, 373 dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
375 DMA_TO_DEVICE); 374 DMA_TO_DEVICE);
376 bp->stats.tx_packets++; 375 bp->stats.tx_packets++;
@@ -395,8 +394,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
395 394
396 len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl); 395 len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl);
397 396
398 dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n", 397 netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
399 first_frag, last_frag, len); 398 first_frag, last_frag, len);
400 399
401 skb = dev_alloc_skb(len + RX_OFFSET); 400 skb = dev_alloc_skb(len + RX_OFFSET);
402 if (!skb) { 401 if (!skb) {
@@ -437,8 +436,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
437 436
438 bp->stats.rx_packets++; 437 bp->stats.rx_packets++;
439 bp->stats.rx_bytes += len; 438 bp->stats.rx_bytes += len;
440 dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n", 439 netdev_dbg(bp->dev, "received skb of length %u, csum: %08x\n",
441 skb->len, skb->csum); 440 skb->len, skb->csum);
442 netif_receive_skb(skb); 441 netif_receive_skb(skb);
443 442
444 return 0; 443 return 0;
@@ -515,8 +514,8 @@ static int macb_poll(struct napi_struct *napi, int budget)
515 514
516 work_done = 0; 515 work_done = 0;
517 516
518 dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n", 517 netdev_dbg(bp->dev, "poll: status = %08lx, budget = %d\n",
519 (unsigned long)status, budget); 518 (unsigned long)status, budget);
520 519
521 work_done = macb_rx(bp, budget); 520 work_done = macb_rx(bp, budget);
522 if (work_done < budget) { 521 if (work_done < budget) {
@@ -565,8 +564,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
565 macb_writel(bp, IDR, MACB_RX_INT_FLAGS); 564 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
566 565
567 if (napi_schedule_prep(&bp->napi)) { 566 if (napi_schedule_prep(&bp->napi)) {
568 dev_dbg(&bp->pdev->dev, 567 netdev_dbg(bp->dev, "scheduling RX softirq\n");
569 "scheduling RX softirq\n");
570 __napi_schedule(&bp->napi); 568 __napi_schedule(&bp->napi);
571 } 569 }
572 } 570 }
@@ -582,16 +580,19 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
582 580
583 if (status & MACB_BIT(ISR_ROVR)) { 581 if (status & MACB_BIT(ISR_ROVR)) {
584 /* We missed at least one packet */ 582 /* We missed at least one packet */
585 bp->hw_stats.rx_overruns++; 583 if (macb_is_gem(bp))
584 bp->hw_stats.gem.rx_overruns++;
585 else
586 bp->hw_stats.macb.rx_overruns++;
586 } 587 }
587 588
588 if (status & MACB_BIT(HRESP)) { 589 if (status & MACB_BIT(HRESP)) {
589 /* 590 /*
590 * TODO: Reset the hardware, and maybe move the printk 591 * TODO: Reset the hardware, and maybe move the
591 * to a lower-priority context as well (work queue?) 592 * netdev_err to a lower-priority context as well
593 * (work queue?)
592 */ 594 */
593 printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n", 595 netdev_err(dev, "DMA bus error: HRESP not OK\n");
594 dev->name);
595 } 596 }
596 597
597 status = macb_readl(bp, ISR); 598 status = macb_readl(bp, ISR);
@@ -626,16 +627,12 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
626 unsigned long flags; 627 unsigned long flags;
627 628
628#ifdef DEBUG 629#ifdef DEBUG
629 int i; 630 netdev_dbg(bp->dev,
630 dev_dbg(&bp->pdev->dev, 631 "start_xmit: len %u head %p data %p tail %p end %p\n",
631 "start_xmit: len %u head %p data %p tail %p end %p\n", 632 skb->len, skb->head, skb->data,
632 skb->len, skb->head, skb->data, 633 skb_tail_pointer(skb), skb_end_pointer(skb));
633 skb_tail_pointer(skb), skb_end_pointer(skb)); 634 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
634 dev_dbg(&bp->pdev->dev, 635 skb->data, 16, true);
635 "data:");
636 for (i = 0; i < 16; i++)
637 printk(" %02x", (unsigned int)skb->data[i]);
638 printk("\n");
639#endif 636#endif
640 637
641 len = skb->len; 638 len = skb->len;
@@ -645,21 +642,20 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
645 if (TX_BUFFS_AVAIL(bp) < 1) { 642 if (TX_BUFFS_AVAIL(bp) < 1) {
646 netif_stop_queue(dev); 643 netif_stop_queue(dev);
647 spin_unlock_irqrestore(&bp->lock, flags); 644 spin_unlock_irqrestore(&bp->lock, flags);
648 dev_err(&bp->pdev->dev, 645 netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
649 "BUG! Tx Ring full when queue awake!\n"); 646 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
650 dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n", 647 bp->tx_head, bp->tx_tail);
651 bp->tx_head, bp->tx_tail);
652 return NETDEV_TX_BUSY; 648 return NETDEV_TX_BUSY;
653 } 649 }
654 650
655 entry = bp->tx_head; 651 entry = bp->tx_head;
656 dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry); 652 netdev_dbg(bp->dev, "Allocated ring entry %u\n", entry);
657 mapping = dma_map_single(&bp->pdev->dev, skb->data, 653 mapping = dma_map_single(&bp->pdev->dev, skb->data,
658 len, DMA_TO_DEVICE); 654 len, DMA_TO_DEVICE);
659 bp->tx_skb[entry].skb = skb; 655 bp->tx_skb[entry].skb = skb;
660 bp->tx_skb[entry].mapping = mapping; 656 bp->tx_skb[entry].mapping = mapping;
661 dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n", 657 netdev_dbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
662 skb->data, (unsigned long)mapping); 658 skb->data, (unsigned long)mapping);
663 659
664 ctrl = MACB_BF(TX_FRMLEN, len); 660 ctrl = MACB_BF(TX_FRMLEN, len);
665 ctrl |= MACB_BIT(TX_LAST); 661 ctrl |= MACB_BIT(TX_LAST);
@@ -723,27 +719,27 @@ static int macb_alloc_consistent(struct macb *bp)
723 &bp->rx_ring_dma, GFP_KERNEL); 719 &bp->rx_ring_dma, GFP_KERNEL);
724 if (!bp->rx_ring) 720 if (!bp->rx_ring)
725 goto out_err; 721 goto out_err;
726 dev_dbg(&bp->pdev->dev, 722 netdev_dbg(bp->dev,
727 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", 723 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
728 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); 724 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
729 725
730 size = TX_RING_BYTES; 726 size = TX_RING_BYTES;
731 bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 727 bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
732 &bp->tx_ring_dma, GFP_KERNEL); 728 &bp->tx_ring_dma, GFP_KERNEL);
733 if (!bp->tx_ring) 729 if (!bp->tx_ring)
734 goto out_err; 730 goto out_err;
735 dev_dbg(&bp->pdev->dev, 731 netdev_dbg(bp->dev,
736 "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", 732 "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
737 size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); 733 size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
738 734
739 size = RX_RING_SIZE * RX_BUFFER_SIZE; 735 size = RX_RING_SIZE * RX_BUFFER_SIZE;
740 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, 736 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
741 &bp->rx_buffers_dma, GFP_KERNEL); 737 &bp->rx_buffers_dma, GFP_KERNEL);
742 if (!bp->rx_buffers) 738 if (!bp->rx_buffers)
743 goto out_err; 739 goto out_err;
744 dev_dbg(&bp->pdev->dev, 740 netdev_dbg(bp->dev,
745 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", 741 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
746 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); 742 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
747 743
748 return 0; 744 return 0;
749 745
@@ -797,6 +793,84 @@ static void macb_reset_hw(struct macb *bp)
797 macb_readl(bp, ISR); 793 macb_readl(bp, ISR);
798} 794}
799 795
796static u32 gem_mdc_clk_div(struct macb *bp)
797{
798 u32 config;
799 unsigned long pclk_hz = clk_get_rate(bp->pclk);
800
801 if (pclk_hz <= 20000000)
802 config = GEM_BF(CLK, GEM_CLK_DIV8);
803 else if (pclk_hz <= 40000000)
804 config = GEM_BF(CLK, GEM_CLK_DIV16);
805 else if (pclk_hz <= 80000000)
806 config = GEM_BF(CLK, GEM_CLK_DIV32);
807 else if (pclk_hz <= 120000000)
808 config = GEM_BF(CLK, GEM_CLK_DIV48);
809 else if (pclk_hz <= 160000000)
810 config = GEM_BF(CLK, GEM_CLK_DIV64);
811 else
812 config = GEM_BF(CLK, GEM_CLK_DIV96);
813
814 return config;
815}
816
817static u32 macb_mdc_clk_div(struct macb *bp)
818{
819 u32 config;
820 unsigned long pclk_hz;
821
822 if (macb_is_gem(bp))
823 return gem_mdc_clk_div(bp);
824
825 pclk_hz = clk_get_rate(bp->pclk);
826 if (pclk_hz <= 20000000)
827 config = MACB_BF(CLK, MACB_CLK_DIV8);
828 else if (pclk_hz <= 40000000)
829 config = MACB_BF(CLK, MACB_CLK_DIV16);
830 else if (pclk_hz <= 80000000)
831 config = MACB_BF(CLK, MACB_CLK_DIV32);
832 else
833 config = MACB_BF(CLK, MACB_CLK_DIV64);
834
835 return config;
836}
837
838/*
839 * Get the DMA bus width field of the network configuration register that we
840 * should program. We find the width from decoding the design configuration
841 * register to find the maximum supported data bus width.
842 */
843static u32 macb_dbw(struct macb *bp)
844{
845 if (!macb_is_gem(bp))
846 return 0;
847
848 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
849 case 4:
850 return GEM_BF(DBW, GEM_DBW128);
851 case 2:
852 return GEM_BF(DBW, GEM_DBW64);
853 case 1:
854 default:
855 return GEM_BF(DBW, GEM_DBW32);
856 }
857}
858
859/*
860 * Configure the receive DMA engine to use the correct receive buffer size.
861 * This is a configurable parameter for GEM.
862 */
863static void macb_configure_dma(struct macb *bp)
864{
865 u32 dmacfg;
866
867 if (macb_is_gem(bp)) {
868 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
869 dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
870 gem_writel(bp, DMACFG, dmacfg);
871 }
872}
873
800static void macb_init_hw(struct macb *bp) 874static void macb_init_hw(struct macb *bp)
801{ 875{
802 u32 config; 876 u32 config;
@@ -804,7 +878,7 @@ static void macb_init_hw(struct macb *bp)
804 macb_reset_hw(bp); 878 macb_reset_hw(bp);
805 __macb_set_hwaddr(bp); 879 __macb_set_hwaddr(bp);
806 880
807 config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L); 881 config = macb_mdc_clk_div(bp);
808 config |= MACB_BIT(PAE); /* PAuse Enable */ 882 config |= MACB_BIT(PAE); /* PAuse Enable */
809 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 883 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
810 config |= MACB_BIT(BIG); /* Receive oversized frames */ 884 config |= MACB_BIT(BIG); /* Receive oversized frames */
@@ -812,8 +886,11 @@ static void macb_init_hw(struct macb *bp)
812 config |= MACB_BIT(CAF); /* Copy All Frames */ 886 config |= MACB_BIT(CAF); /* Copy All Frames */
813 if (!(bp->dev->flags & IFF_BROADCAST)) 887 if (!(bp->dev->flags & IFF_BROADCAST))
814 config |= MACB_BIT(NBC); /* No BroadCast */ 888 config |= MACB_BIT(NBC); /* No BroadCast */
889 config |= macb_dbw(bp);
815 macb_writel(bp, NCFGR, config); 890 macb_writel(bp, NCFGR, config);
816 891
892 macb_configure_dma(bp);
893
817 /* Initialize TX and RX buffers */ 894 /* Initialize TX and RX buffers */
818 macb_writel(bp, RBQP, bp->rx_ring_dma); 895 macb_writel(bp, RBQP, bp->rx_ring_dma);
819 macb_writel(bp, TBQP, bp->tx_ring_dma); 896 macb_writel(bp, TBQP, bp->tx_ring_dma);
@@ -909,8 +986,8 @@ static void macb_sethashtable(struct net_device *dev)
909 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 986 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
910 } 987 }
911 988
912 macb_writel(bp, HRB, mc_filter[0]); 989 macb_or_gem_writel(bp, HRB, mc_filter[0]);
913 macb_writel(bp, HRT, mc_filter[1]); 990 macb_or_gem_writel(bp, HRT, mc_filter[1]);
914} 991}
915 992
916/* 993/*
@@ -932,8 +1009,8 @@ static void macb_set_rx_mode(struct net_device *dev)
932 1009
933 if (dev->flags & IFF_ALLMULTI) { 1010 if (dev->flags & IFF_ALLMULTI) {
934 /* Enable all multicast mode */ 1011 /* Enable all multicast mode */
935 macb_writel(bp, HRB, -1); 1012 macb_or_gem_writel(bp, HRB, -1);
936 macb_writel(bp, HRT, -1); 1013 macb_or_gem_writel(bp, HRT, -1);
937 cfg |= MACB_BIT(NCFGR_MTI); 1014 cfg |= MACB_BIT(NCFGR_MTI);
938 } else if (!netdev_mc_empty(dev)) { 1015 } else if (!netdev_mc_empty(dev)) {
939 /* Enable specific multicasts */ 1016 /* Enable specific multicasts */
@@ -941,8 +1018,8 @@ static void macb_set_rx_mode(struct net_device *dev)
941 cfg |= MACB_BIT(NCFGR_MTI); 1018 cfg |= MACB_BIT(NCFGR_MTI);
942 } else if (dev->flags & (~IFF_ALLMULTI)) { 1019 } else if (dev->flags & (~IFF_ALLMULTI)) {
943 /* Disable all multicast mode */ 1020 /* Disable all multicast mode */
944 macb_writel(bp, HRB, 0); 1021 macb_or_gem_writel(bp, HRB, 0);
945 macb_writel(bp, HRT, 0); 1022 macb_or_gem_writel(bp, HRT, 0);
946 cfg &= ~MACB_BIT(NCFGR_MTI); 1023 cfg &= ~MACB_BIT(NCFGR_MTI);
947 } 1024 }
948 1025
@@ -954,7 +1031,7 @@ static int macb_open(struct net_device *dev)
954 struct macb *bp = netdev_priv(dev); 1031 struct macb *bp = netdev_priv(dev);
955 int err; 1032 int err;
956 1033
957 dev_dbg(&bp->pdev->dev, "open\n"); 1034 netdev_dbg(bp->dev, "open\n");
958 1035
959 /* if the phy is not yet register, retry later*/ 1036 /* if the phy is not yet register, retry later*/
960 if (!bp->phy_dev) 1037 if (!bp->phy_dev)
@@ -965,9 +1042,8 @@ static int macb_open(struct net_device *dev)
965 1042
966 err = macb_alloc_consistent(bp); 1043 err = macb_alloc_consistent(bp);
967 if (err) { 1044 if (err) {
968 printk(KERN_ERR 1045 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
969 "%s: Unable to allocate DMA memory (error %d)\n", 1046 err);
970 dev->name, err);
971 return err; 1047 return err;
972 } 1048 }
973 1049
@@ -1005,11 +1081,62 @@ static int macb_close(struct net_device *dev)
1005 return 0; 1081 return 0;
1006} 1082}
1007 1083
1084static void gem_update_stats(struct macb *bp)
1085{
1086 u32 __iomem *reg = bp->regs + GEM_OTX;
1087 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1088 u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
1089
1090 for (; p < end; p++, reg++)
1091 *p += __raw_readl(reg);
1092}
1093
1094static struct net_device_stats *gem_get_stats(struct macb *bp)
1095{
1096 struct gem_stats *hwstat = &bp->hw_stats.gem;
1097 struct net_device_stats *nstat = &bp->stats;
1098
1099 gem_update_stats(bp);
1100
1101 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
1102 hwstat->rx_alignment_errors +
1103 hwstat->rx_resource_errors +
1104 hwstat->rx_overruns +
1105 hwstat->rx_oversize_frames +
1106 hwstat->rx_jabbers +
1107 hwstat->rx_undersized_frames +
1108 hwstat->rx_length_field_frame_errors);
1109 nstat->tx_errors = (hwstat->tx_late_collisions +
1110 hwstat->tx_excessive_collisions +
1111 hwstat->tx_underrun +
1112 hwstat->tx_carrier_sense_errors);
1113 nstat->multicast = hwstat->rx_multicast_frames;
1114 nstat->collisions = (hwstat->tx_single_collision_frames +
1115 hwstat->tx_multiple_collision_frames +
1116 hwstat->tx_excessive_collisions);
1117 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
1118 hwstat->rx_jabbers +
1119 hwstat->rx_undersized_frames +
1120 hwstat->rx_length_field_frame_errors);
1121 nstat->rx_over_errors = hwstat->rx_resource_errors;
1122 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
1123 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
1124 nstat->rx_fifo_errors = hwstat->rx_overruns;
1125 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
1126 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
1127 nstat->tx_fifo_errors = hwstat->tx_underrun;
1128
1129 return nstat;
1130}
1131
1008static struct net_device_stats *macb_get_stats(struct net_device *dev) 1132static struct net_device_stats *macb_get_stats(struct net_device *dev)
1009{ 1133{
1010 struct macb *bp = netdev_priv(dev); 1134 struct macb *bp = netdev_priv(dev);
1011 struct net_device_stats *nstat = &bp->stats; 1135 struct net_device_stats *nstat = &bp->stats;
1012 struct macb_stats *hwstat = &bp->hw_stats; 1136 struct macb_stats *hwstat = &bp->hw_stats.macb;
1137
1138 if (macb_is_gem(bp))
1139 return gem_get_stats(bp);
1013 1140
1014 /* read stats from hardware */ 1141 /* read stats from hardware */
1015 macb_update_stats(bp); 1142 macb_update_stats(bp);
@@ -1119,12 +1246,11 @@ static const struct net_device_ops macb_netdev_ops = {
1119 1246
1120static int __init macb_probe(struct platform_device *pdev) 1247static int __init macb_probe(struct platform_device *pdev)
1121{ 1248{
1122 struct eth_platform_data *pdata; 1249 struct macb_platform_data *pdata;
1123 struct resource *regs; 1250 struct resource *regs;
1124 struct net_device *dev; 1251 struct net_device *dev;
1125 struct macb *bp; 1252 struct macb *bp;
1126 struct phy_device *phydev; 1253 struct phy_device *phydev;
1127 unsigned long pclk_hz;
1128 u32 config; 1254 u32 config;
1129 int err = -ENXIO; 1255 int err = -ENXIO;
1130 1256
@@ -1152,28 +1278,19 @@ static int __init macb_probe(struct platform_device *pdev)
1152 1278
1153 spin_lock_init(&bp->lock); 1279 spin_lock_init(&bp->lock);
1154 1280
1155#if defined(CONFIG_ARCH_AT91) 1281 bp->pclk = clk_get(&pdev->dev, "pclk");
1156 bp->pclk = clk_get(&pdev->dev, "macb_clk");
1157 if (IS_ERR(bp->pclk)) { 1282 if (IS_ERR(bp->pclk)) {
1158 dev_err(&pdev->dev, "failed to get macb_clk\n"); 1283 dev_err(&pdev->dev, "failed to get macb_clk\n");
1159 goto err_out_free_dev; 1284 goto err_out_free_dev;
1160 } 1285 }
1161 clk_enable(bp->pclk); 1286 clk_enable(bp->pclk);
1162#else 1287
1163 bp->pclk = clk_get(&pdev->dev, "pclk");
1164 if (IS_ERR(bp->pclk)) {
1165 dev_err(&pdev->dev, "failed to get pclk\n");
1166 goto err_out_free_dev;
1167 }
1168 bp->hclk = clk_get(&pdev->dev, "hclk"); 1288 bp->hclk = clk_get(&pdev->dev, "hclk");
1169 if (IS_ERR(bp->hclk)) { 1289 if (IS_ERR(bp->hclk)) {
1170 dev_err(&pdev->dev, "failed to get hclk\n"); 1290 dev_err(&pdev->dev, "failed to get hclk\n");
1171 goto err_out_put_pclk; 1291 goto err_out_put_pclk;
1172 } 1292 }
1173
1174 clk_enable(bp->pclk);
1175 clk_enable(bp->hclk); 1293 clk_enable(bp->hclk);
1176#endif
1177 1294
1178 bp->regs = ioremap(regs->start, resource_size(regs)); 1295 bp->regs = ioremap(regs->start, resource_size(regs));
1179 if (!bp->regs) { 1296 if (!bp->regs) {
@@ -1185,9 +1302,8 @@ static int __init macb_probe(struct platform_device *pdev)
1185 dev->irq = platform_get_irq(pdev, 0); 1302 dev->irq = platform_get_irq(pdev, 0);
1186 err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev); 1303 err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
1187 if (err) { 1304 if (err) {
1188 printk(KERN_ERR 1305 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
1189 "%s: Unable to request IRQ %d (error %d)\n", 1306 dev->irq, err);
1190 dev->name, dev->irq, err);
1191 goto err_out_iounmap; 1307 goto err_out_iounmap;
1192 } 1308 }
1193 1309
@@ -1198,15 +1314,8 @@ static int __init macb_probe(struct platform_device *pdev)
1198 dev->base_addr = regs->start; 1314 dev->base_addr = regs->start;
1199 1315
1200 /* Set MII management clock divider */ 1316 /* Set MII management clock divider */
1201 pclk_hz = clk_get_rate(bp->pclk); 1317 config = macb_mdc_clk_div(bp);
1202 if (pclk_hz <= 20000000) 1318 config |= macb_dbw(bp);
1203 config = MACB_BF(CLK, MACB_CLK_DIV8);
1204 else if (pclk_hz <= 40000000)
1205 config = MACB_BF(CLK, MACB_CLK_DIV16);
1206 else if (pclk_hz <= 80000000)
1207 config = MACB_BF(CLK, MACB_CLK_DIV32);
1208 else
1209 config = MACB_BF(CLK, MACB_CLK_DIV64);
1210 macb_writel(bp, NCFGR, config); 1319 macb_writel(bp, NCFGR, config);
1211 1320
1212 macb_get_hwaddr(bp); 1321 macb_get_hwaddr(bp);
@@ -1214,15 +1323,16 @@ static int __init macb_probe(struct platform_device *pdev)
1214 1323
1215 if (pdata && pdata->is_rmii) 1324 if (pdata && pdata->is_rmii)
1216#if defined(CONFIG_ARCH_AT91) 1325#if defined(CONFIG_ARCH_AT91)
1217 macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) ); 1326 macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
1327 MACB_BIT(CLKEN)));
1218#else 1328#else
1219 macb_writel(bp, USRIO, 0); 1329 macb_or_gem_writel(bp, USRIO, 0);
1220#endif 1330#endif
1221 else 1331 else
1222#if defined(CONFIG_ARCH_AT91) 1332#if defined(CONFIG_ARCH_AT91)
1223 macb_writel(bp, USRIO, MACB_BIT(CLKEN)); 1333 macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
1224#else 1334#else
1225 macb_writel(bp, USRIO, MACB_BIT(MII)); 1335 macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
1226#endif 1336#endif
1227 1337
1228 bp->tx_pending = DEF_TX_RING_PENDING; 1338 bp->tx_pending = DEF_TX_RING_PENDING;
@@ -1239,13 +1349,13 @@ static int __init macb_probe(struct platform_device *pdev)
1239 1349
1240 platform_set_drvdata(pdev, dev); 1350 platform_set_drvdata(pdev, dev);
1241 1351
1242 printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d (%pM)\n", 1352 netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
1243 dev->name, dev->base_addr, dev->irq, dev->dev_addr); 1353 macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
1354 dev->irq, dev->dev_addr);
1244 1355
1245 phydev = bp->phy_dev; 1356 phydev = bp->phy_dev;
1246 printk(KERN_INFO "%s: attached PHY driver [%s] " 1357 netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1247 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, 1358 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
1248 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
1249 1359
1250 return 0; 1360 return 0;
1251 1361
@@ -1256,14 +1366,10 @@ err_out_free_irq:
1256err_out_iounmap: 1366err_out_iounmap:
1257 iounmap(bp->regs); 1367 iounmap(bp->regs);
1258err_out_disable_clocks: 1368err_out_disable_clocks:
1259#ifndef CONFIG_ARCH_AT91
1260 clk_disable(bp->hclk); 1369 clk_disable(bp->hclk);
1261 clk_put(bp->hclk); 1370 clk_put(bp->hclk);
1262#endif
1263 clk_disable(bp->pclk); 1371 clk_disable(bp->pclk);
1264#ifndef CONFIG_ARCH_AT91
1265err_out_put_pclk: 1372err_out_put_pclk:
1266#endif
1267 clk_put(bp->pclk); 1373 clk_put(bp->pclk);
1268err_out_free_dev: 1374err_out_free_dev:
1269 free_netdev(dev); 1375 free_netdev(dev);
@@ -1289,10 +1395,8 @@ static int __exit macb_remove(struct platform_device *pdev)
1289 unregister_netdev(dev); 1395 unregister_netdev(dev);
1290 free_irq(dev->irq, dev); 1396 free_irq(dev->irq, dev);
1291 iounmap(bp->regs); 1397 iounmap(bp->regs);
1292#ifndef CONFIG_ARCH_AT91
1293 clk_disable(bp->hclk); 1398 clk_disable(bp->hclk);
1294 clk_put(bp->hclk); 1399 clk_put(bp->hclk);
1295#endif
1296 clk_disable(bp->pclk); 1400 clk_disable(bp->pclk);
1297 clk_put(bp->pclk); 1401 clk_put(bp->pclk);
1298 free_netdev(dev); 1402 free_netdev(dev);
@@ -1310,9 +1414,7 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state)
1310 1414
1311 netif_device_detach(netdev); 1415 netif_device_detach(netdev);
1312 1416
1313#ifndef CONFIG_ARCH_AT91
1314 clk_disable(bp->hclk); 1417 clk_disable(bp->hclk);
1315#endif
1316 clk_disable(bp->pclk); 1418 clk_disable(bp->pclk);
1317 1419
1318 return 0; 1420 return 0;
@@ -1324,9 +1426,7 @@ static int macb_resume(struct platform_device *pdev)
1324 struct macb *bp = netdev_priv(netdev); 1426 struct macb *bp = netdev_priv(netdev);
1325 1427
1326 clk_enable(bp->pclk); 1428 clk_enable(bp->pclk);
1327#ifndef CONFIG_ARCH_AT91
1328 clk_enable(bp->hclk); 1429 clk_enable(bp->hclk);
1329#endif
1330 1430
1331 netif_device_attach(netdev); 1431 netif_device_attach(netdev);
1332 1432
@@ -1361,6 +1461,6 @@ module_init(macb_init);
1361module_exit(macb_exit); 1461module_exit(macb_exit);
1362 1462
1363MODULE_LICENSE("GPL"); 1463MODULE_LICENSE("GPL");
1364MODULE_DESCRIPTION("Atmel MACB Ethernet driver"); 1464MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
1365MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1465MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1366MODULE_ALIAS("platform:macb"); 1466MODULE_ALIAS("platform:macb");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d3212f6db703..193107884a5a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -59,6 +59,24 @@
59#define MACB_TPQ 0x00bc 59#define MACB_TPQ 0x00bc
60#define MACB_USRIO 0x00c0 60#define MACB_USRIO 0x00c0
61#define MACB_WOL 0x00c4 61#define MACB_WOL 0x00c4
62#define MACB_MID 0x00fc
63
64/* GEM register offsets. */
65#define GEM_NCFGR 0x0004
66#define GEM_USRIO 0x000c
67#define GEM_DMACFG 0x0010
68#define GEM_HRB 0x0080
69#define GEM_HRT 0x0084
70#define GEM_SA1B 0x0088
71#define GEM_SA1T 0x008C
72#define GEM_OTX 0x0100
73#define GEM_DCFG1 0x0280
74#define GEM_DCFG2 0x0284
75#define GEM_DCFG3 0x0288
76#define GEM_DCFG4 0x028c
77#define GEM_DCFG5 0x0290
78#define GEM_DCFG6 0x0294
79#define GEM_DCFG7 0x0298
62 80
63/* Bitfields in NCR */ 81/* Bitfields in NCR */
64#define MACB_LB_OFFSET 0 82#define MACB_LB_OFFSET 0
@@ -126,6 +144,21 @@
126#define MACB_IRXFCS_OFFSET 19 144#define MACB_IRXFCS_OFFSET 19
127#define MACB_IRXFCS_SIZE 1 145#define MACB_IRXFCS_SIZE 1
128 146
147/* GEM specific NCFGR bitfields. */
148#define GEM_CLK_OFFSET 18
149#define GEM_CLK_SIZE 3
150#define GEM_DBW_OFFSET 21
151#define GEM_DBW_SIZE 2
152
153/* Constants for data bus width. */
154#define GEM_DBW32 0
155#define GEM_DBW64 1
156#define GEM_DBW128 2
157
158/* Bitfields in DMACFG. */
159#define GEM_RXBS_OFFSET 16
160#define GEM_RXBS_SIZE 8
161
129/* Bitfields in NSR */ 162/* Bitfields in NSR */
130#define MACB_NSR_LINK_OFFSET 0 163#define MACB_NSR_LINK_OFFSET 0
131#define MACB_NSR_LINK_SIZE 1 164#define MACB_NSR_LINK_SIZE 1
@@ -228,12 +261,30 @@
228#define MACB_WOL_MTI_OFFSET 19 261#define MACB_WOL_MTI_OFFSET 19
229#define MACB_WOL_MTI_SIZE 1 262#define MACB_WOL_MTI_SIZE 1
230 263
264/* Bitfields in MID */
265#define MACB_IDNUM_OFFSET 16
266#define MACB_IDNUM_SIZE 16
267#define MACB_REV_OFFSET 0
268#define MACB_REV_SIZE 16
269
270/* Bitfields in DCFG1. */
271#define GEM_DBWDEF_OFFSET 25
272#define GEM_DBWDEF_SIZE 3
273
231/* Constants for CLK */ 274/* Constants for CLK */
232#define MACB_CLK_DIV8 0 275#define MACB_CLK_DIV8 0
233#define MACB_CLK_DIV16 1 276#define MACB_CLK_DIV16 1
234#define MACB_CLK_DIV32 2 277#define MACB_CLK_DIV32 2
235#define MACB_CLK_DIV64 3 278#define MACB_CLK_DIV64 3
236 279
280/* GEM specific constants for CLK. */
281#define GEM_CLK_DIV8 0
282#define GEM_CLK_DIV16 1
283#define GEM_CLK_DIV32 2
284#define GEM_CLK_DIV48 3
285#define GEM_CLK_DIV64 4
286#define GEM_CLK_DIV96 5
287
237/* Constants for MAN register */ 288/* Constants for MAN register */
238#define MACB_MAN_SOF 1 289#define MACB_MAN_SOF 1
239#define MACB_MAN_WRITE 1 290#define MACB_MAN_WRITE 1
@@ -254,11 +305,52 @@
254 << MACB_##name##_OFFSET)) \ 305 << MACB_##name##_OFFSET)) \
255 | MACB_BF(name,value)) 306 | MACB_BF(name,value))
256 307
308#define GEM_BIT(name) \
309 (1 << GEM_##name##_OFFSET)
310#define GEM_BF(name, value) \
311 (((value) & ((1 << GEM_##name##_SIZE) - 1)) \
312 << GEM_##name##_OFFSET)
313#define GEM_BFEXT(name, value)\
314 (((value) >> GEM_##name##_OFFSET) \
315 & ((1 << GEM_##name##_SIZE) - 1))
316#define GEM_BFINS(name, value, old) \
317 (((old) & ~(((1 << GEM_##name##_SIZE) - 1) \
318 << GEM_##name##_OFFSET)) \
319 | GEM_BF(name, value))
320
257/* Register access macros */ 321/* Register access macros */
258#define macb_readl(port,reg) \ 322#define macb_readl(port,reg) \
259 __raw_readl((port)->regs + MACB_##reg) 323 __raw_readl((port)->regs + MACB_##reg)
260#define macb_writel(port,reg,value) \ 324#define macb_writel(port,reg,value) \
261 __raw_writel((value), (port)->regs + MACB_##reg) 325 __raw_writel((value), (port)->regs + MACB_##reg)
326#define gem_readl(port, reg) \
327 __raw_readl((port)->regs + GEM_##reg)
328#define gem_writel(port, reg, value) \
329 __raw_writel((value), (port)->regs + GEM_##reg)
330
331/*
332 * Conditional GEM/MACB macros. These perform the operation to the correct
333 * register dependent on whether the device is a GEM or a MACB. For registers
334 * and bitfields that are common across both devices, use macb_{read,write}l
335 * to avoid the cost of the conditional.
336 */
337#define macb_or_gem_writel(__bp, __reg, __value) \
338 ({ \
339 if (macb_is_gem((__bp))) \
340 gem_writel((__bp), __reg, __value); \
341 else \
342 macb_writel((__bp), __reg, __value); \
343 })
344
345#define macb_or_gem_readl(__bp, __reg) \
346 ({ \
347 u32 __v; \
348 if (macb_is_gem((__bp))) \
349 __v = gem_readl((__bp), __reg); \
350 else \
351 __v = macb_readl((__bp), __reg); \
352 __v; \
353 })
262 354
263struct dma_desc { 355struct dma_desc {
264 u32 addr; 356 u32 addr;
@@ -358,6 +450,54 @@ struct macb_stats {
358 u32 tx_pause_frames; 450 u32 tx_pause_frames;
359}; 451};
360 452
453struct gem_stats {
454 u32 tx_octets_31_0;
455 u32 tx_octets_47_32;
456 u32 tx_frames;
457 u32 tx_broadcast_frames;
458 u32 tx_multicast_frames;
459 u32 tx_pause_frames;
460 u32 tx_64_byte_frames;
461 u32 tx_65_127_byte_frames;
462 u32 tx_128_255_byte_frames;
463 u32 tx_256_511_byte_frames;
464 u32 tx_512_1023_byte_frames;
465 u32 tx_1024_1518_byte_frames;
466 u32 tx_greater_than_1518_byte_frames;
467 u32 tx_underrun;
468 u32 tx_single_collision_frames;
469 u32 tx_multiple_collision_frames;
470 u32 tx_excessive_collisions;
471 u32 tx_late_collisions;
472 u32 tx_deferred_frames;
473 u32 tx_carrier_sense_errors;
474 u32 rx_octets_31_0;
475 u32 rx_octets_47_32;
476 u32 rx_frames;
477 u32 rx_broadcast_frames;
478 u32 rx_multicast_frames;
479 u32 rx_pause_frames;
480 u32 rx_64_byte_frames;
481 u32 rx_65_127_byte_frames;
482 u32 rx_128_255_byte_frames;
483 u32 rx_256_511_byte_frames;
484 u32 rx_512_1023_byte_frames;
485 u32 rx_1024_1518_byte_frames;
486 u32 rx_greater_than_1518_byte_frames;
487 u32 rx_undersized_frames;
488 u32 rx_oversize_frames;
489 u32 rx_jabbers;
490 u32 rx_frame_check_sequence_errors;
491 u32 rx_length_field_frame_errors;
492 u32 rx_symbol_errors;
493 u32 rx_alignment_errors;
494 u32 rx_resource_errors;
495 u32 rx_overruns;
496 u32 rx_ip_header_checksum_errors;
497 u32 rx_tcp_checksum_errors;
498 u32 rx_udp_checksum_errors;
499};
500
361struct macb { 501struct macb {
362 void __iomem *regs; 502 void __iomem *regs;
363 503
@@ -376,7 +516,10 @@ struct macb {
376 struct net_device *dev; 516 struct net_device *dev;
377 struct napi_struct napi; 517 struct napi_struct napi;
378 struct net_device_stats stats; 518 struct net_device_stats stats;
379 struct macb_stats hw_stats; 519 union {
520 struct macb_stats macb;
521 struct gem_stats gem;
522 } hw_stats;
380 523
381 dma_addr_t rx_ring_dma; 524 dma_addr_t rx_ring_dma;
382 dma_addr_t tx_ring_dma; 525 dma_addr_t tx_ring_dma;
@@ -391,4 +534,9 @@ struct macb {
391 unsigned int duplex; 534 unsigned int duplex;
392}; 535};
393 536
537static inline bool macb_is_gem(struct macb *bp)
538{
539 return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2;
540}
541
394#endif /* _MACB_H */ 542#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 1cf671643d1f..c520cfd3b298 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -7,8 +7,7 @@ config NET_VENDOR_FREESCALE
7 default y 7 default y
8 depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ 8 depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
9 M523x || M527x || M5272 || M528x || M520x || M532x || \ 9 M523x || M527x || M5272 || M528x || M520x || M532x || \
10 ARCH_MXC || ARCH_MXS || \ 10 ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
11 (PPC_MPC52xx && PPC_BESTCOMM)
12 ---help--- 11 ---help---
13 If you have a network (Ethernet) card belonging to this class, say Y 12 If you have a network (Ethernet) card belonging to this class, say Y
14 and read the Ethernet-HOWTO, available from 13 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 61029dc7fa6f..76213162fbe3 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -5,7 +5,11 @@
5config NET_VENDOR_INTEL 5config NET_VENDOR_INTEL
6 bool "Intel devices" 6 bool "Intel devices"
7 default y 7 default y
8 depends on PCI || PCI_MSI 8 depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \
9 ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
10 GSC || BVME6000 || MVME16x || ARCH_ENP2611 || \
11 (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \
12 EXPERIMENTAL
9 ---help--- 13 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 14 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 15 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index db95731863d7..00fcd39ad666 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -442,12 +442,14 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
442 442
443int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter) 443int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter)
444{ 444{
445#ifdef CONFIG_PCI_IOV
445 int i; 446 int i;
446 for (i = 0; i < adapter->num_vfs; i++) { 447 for (i = 0; i < adapter->num_vfs; i++) {
447 if (adapter->vfinfo[i].vfdev->dev_flags & 448 if (adapter->vfinfo[i].vfdev->dev_flags &
448 PCI_DEV_FLAGS_ASSIGNED) 449 PCI_DEV_FLAGS_ASSIGNED)
449 return true; 450 return true;
450 } 451 }
452#endif
451 return false; 453 return false;
452} 454}
453 455
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 4a5d8897faab..df04f1a3857c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -42,11 +42,11 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
42int ixgbe_ndo_get_vf_config(struct net_device *netdev, 42int ixgbe_ndo_get_vf_config(struct net_device *netdev,
43 int vf, struct ifla_vf_info *ivi); 43 int vf, struct ifla_vf_info *ivi);
44void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); 44void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
45#ifdef CONFIG_PCI_IOV
46void ixgbe_disable_sriov(struct ixgbe_adapter *adapter); 45void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
46int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
47#ifdef CONFIG_PCI_IOV
47void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, 48void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
48 const struct ixgbe_info *ii); 49 const struct ixgbe_info *ii);
49int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
50#endif 50#endif
51 51
52 52
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index cbd026f3bc57..fdc6c394c683 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -366,17 +366,6 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
366 gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); 366 gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
367 } 367 }
368 } else { 368 } else {
369 if (hw->chip_id >= CHIP_ID_YUKON_OPT) {
370 u16 ctrl2 = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL_2);
371
372 /* enable PHY Reverse Auto-Negotiation */
373 ctrl2 |= 1u << 13;
374
375 /* Write PHY changes (SW-reset must follow) */
376 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL_2, ctrl2);
377 }
378
379
380 /* disable energy detect */ 369 /* disable energy detect */
381 ctrl &= ~PHY_M_PC_EN_DET_MSK; 370 ctrl &= ~PHY_M_PC_EN_DET_MSK;
382 371
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
index 4a6b9fd073b6..eb836f770f50 100644
--- a/drivers/net/ethernet/natsemi/Kconfig
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -5,7 +5,10 @@
5config NET_VENDOR_NATSEMI 5config NET_VENDOR_NATSEMI
6 bool "National Semi-conductor devices" 6 bool "National Semi-conductor devices"
7 default y 7 default y
8 depends on MCA || MAC || MACH_JAZZ || PCI || XTENSA_PLATFORM_XT2000 8 depends on AMIGA_PCMCIA || ARM || EISA || EXPERIMENTAL || H8300 || \
9 ISA || M32R || MAC || MACH_JAZZ || MACH_TX49XX || MCA || \
10 MCA_LEGACY || MIPS || PCI || PCMCIA || SUPERH || \
11 XTENSA_PLATFORM_XT2000 || ZORRO
9 ---help--- 12 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 13 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 14 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1e37eb98c4e2..1dca57013cb2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1682,6 +1682,7 @@ static void nv_get_hw_stats(struct net_device *dev)
1682 np->estats.tx_pause += readl(base + NvRegTxPause); 1682 np->estats.tx_pause += readl(base + NvRegTxPause);
1683 np->estats.rx_pause += readl(base + NvRegRxPause); 1683 np->estats.rx_pause += readl(base + NvRegRxPause);
1684 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1684 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1685 np->estats.rx_errors_total += np->estats.rx_drop_frame;
1685 } 1686 }
1686 1687
1687 if (np->driver_data & DEV_HAS_STATISTICS_V3) { 1688 if (np->driver_data & DEV_HAS_STATISTICS_V3) {
@@ -1706,11 +1707,14 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1706 nv_get_hw_stats(dev); 1707 nv_get_hw_stats(dev);
1707 1708
1708 /* copy to net_device stats */ 1709 /* copy to net_device stats */
1710 dev->stats.tx_packets = np->estats.tx_packets;
1711 dev->stats.rx_bytes = np->estats.rx_bytes;
1709 dev->stats.tx_bytes = np->estats.tx_bytes; 1712 dev->stats.tx_bytes = np->estats.tx_bytes;
1710 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; 1713 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1711 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; 1714 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1712 dev->stats.rx_crc_errors = np->estats.rx_crc_errors; 1715 dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
1713 dev->stats.rx_over_errors = np->estats.rx_over_errors; 1716 dev->stats.rx_over_errors = np->estats.rx_over_errors;
1717 dev->stats.rx_fifo_errors = np->estats.rx_drop_frame;
1714 dev->stats.rx_errors = np->estats.rx_errors_total; 1718 dev->stats.rx_errors = np->estats.rx_errors_total;
1715 dev->stats.tx_errors = np->estats.tx_errors_total; 1719 dev->stats.tx_errors = np->estats.tx_errors_total;
1716 } 1720 }
@@ -2099,10 +2103,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2099 2103
2100 /* add fragments to entries count */ 2104 /* add fragments to entries count */
2101 for (i = 0; i < fragments; i++) { 2105 for (i = 0; i < fragments; i++) {
2102 u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2106 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2103 2107
2104 entries += (size >> NV_TX2_TSO_MAX_SHIFT) + 2108 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2105 ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2109 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2106 } 2110 }
2107 2111
2108 spin_lock_irqsave(&np->lock, flags); 2112 spin_lock_irqsave(&np->lock, flags);
@@ -2141,13 +2145,13 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2141 /* setup the fragments */ 2145 /* setup the fragments */
2142 for (i = 0; i < fragments; i++) { 2146 for (i = 0; i < fragments; i++) {
2143 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2147 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2144 u32 size = skb_frag_size(frag); 2148 u32 frag_size = skb_frag_size(frag);
2145 offset = 0; 2149 offset = 0;
2146 2150
2147 do { 2151 do {
2148 prev_tx = put_tx; 2152 prev_tx = put_tx;
2149 prev_tx_ctx = np->put_tx_ctx; 2153 prev_tx_ctx = np->put_tx_ctx;
2150 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2154 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2151 np->put_tx_ctx->dma = skb_frag_dma_map( 2155 np->put_tx_ctx->dma = skb_frag_dma_map(
2152 &np->pci_dev->dev, 2156 &np->pci_dev->dev,
2153 frag, offset, 2157 frag, offset,
@@ -2159,12 +2163,12 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2159 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2163 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2160 2164
2161 offset += bcnt; 2165 offset += bcnt;
2162 size -= bcnt; 2166 frag_size -= bcnt;
2163 if (unlikely(put_tx++ == np->last_tx.orig)) 2167 if (unlikely(put_tx++ == np->last_tx.orig))
2164 put_tx = np->first_tx.orig; 2168 put_tx = np->first_tx.orig;
2165 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2169 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2166 np->put_tx_ctx = np->first_tx_ctx; 2170 np->put_tx_ctx = np->first_tx_ctx;
2167 } while (size); 2171 } while (frag_size);
2168 } 2172 }
2169 2173
2170 /* set last fragment flag */ 2174 /* set last fragment flag */
@@ -2213,10 +2217,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2213 2217
2214 /* add fragments to entries count */ 2218 /* add fragments to entries count */
2215 for (i = 0; i < fragments; i++) { 2219 for (i = 0; i < fragments; i++) {
2216 u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2220 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2217 2221
2218 entries += (size >> NV_TX2_TSO_MAX_SHIFT) + 2222 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2219 ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2223 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2220 } 2224 }
2221 2225
2222 spin_lock_irqsave(&np->lock, flags); 2226 spin_lock_irqsave(&np->lock, flags);
@@ -2257,13 +2261,13 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2257 /* setup the fragments */ 2261 /* setup the fragments */
2258 for (i = 0; i < fragments; i++) { 2262 for (i = 0; i < fragments; i++) {
2259 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2263 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2260 u32 size = skb_frag_size(frag); 2264 u32 frag_size = skb_frag_size(frag);
2261 offset = 0; 2265 offset = 0;
2262 2266
2263 do { 2267 do {
2264 prev_tx = put_tx; 2268 prev_tx = put_tx;
2265 prev_tx_ctx = np->put_tx_ctx; 2269 prev_tx_ctx = np->put_tx_ctx;
2266 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2270 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2267 np->put_tx_ctx->dma = skb_frag_dma_map( 2271 np->put_tx_ctx->dma = skb_frag_dma_map(
2268 &np->pci_dev->dev, 2272 &np->pci_dev->dev,
2269 frag, offset, 2273 frag, offset,
@@ -2276,12 +2280,12 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2276 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2280 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2277 2281
2278 offset += bcnt; 2282 offset += bcnt;
2279 size -= bcnt; 2283 frag_size -= bcnt;
2280 if (unlikely(put_tx++ == np->last_tx.ex)) 2284 if (unlikely(put_tx++ == np->last_tx.ex))
2281 put_tx = np->first_tx.ex; 2285 put_tx = np->first_tx.ex;
2282 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2286 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2283 np->put_tx_ctx = np->first_tx_ctx; 2287 np->put_tx_ctx = np->first_tx_ctx;
2284 } while (size); 2288 } while (frag_size);
2285 } 2289 }
2286 2290
2287 /* set last fragment flag */ 2291 /* set last fragment flag */
@@ -2374,16 +2378,8 @@ static int nv_tx_done(struct net_device *dev, int limit)
2374 if (np->desc_ver == DESC_VER_1) { 2378 if (np->desc_ver == DESC_VER_1) {
2375 if (flags & NV_TX_LASTPACKET) { 2379 if (flags & NV_TX_LASTPACKET) {
2376 if (flags & NV_TX_ERROR) { 2380 if (flags & NV_TX_ERROR) {
2377 if (flags & NV_TX_UNDERFLOW)
2378 dev->stats.tx_fifo_errors++;
2379 if (flags & NV_TX_CARRIERLOST)
2380 dev->stats.tx_carrier_errors++;
2381 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) 2381 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2382 nv_legacybackoff_reseed(dev); 2382 nv_legacybackoff_reseed(dev);
2383 dev->stats.tx_errors++;
2384 } else {
2385 dev->stats.tx_packets++;
2386 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2387 } 2383 }
2388 dev_kfree_skb_any(np->get_tx_ctx->skb); 2384 dev_kfree_skb_any(np->get_tx_ctx->skb);
2389 np->get_tx_ctx->skb = NULL; 2385 np->get_tx_ctx->skb = NULL;
@@ -2392,16 +2388,8 @@ static int nv_tx_done(struct net_device *dev, int limit)
2392 } else { 2388 } else {
2393 if (flags & NV_TX2_LASTPACKET) { 2389 if (flags & NV_TX2_LASTPACKET) {
2394 if (flags & NV_TX2_ERROR) { 2390 if (flags & NV_TX2_ERROR) {
2395 if (flags & NV_TX2_UNDERFLOW)
2396 dev->stats.tx_fifo_errors++;
2397 if (flags & NV_TX2_CARRIERLOST)
2398 dev->stats.tx_carrier_errors++;
2399 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2391 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2400 nv_legacybackoff_reseed(dev); 2392 nv_legacybackoff_reseed(dev);
2401 dev->stats.tx_errors++;
2402 } else {
2403 dev->stats.tx_packets++;
2404 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2405 } 2393 }
2406 dev_kfree_skb_any(np->get_tx_ctx->skb); 2394 dev_kfree_skb_any(np->get_tx_ctx->skb);
2407 np->get_tx_ctx->skb = NULL; 2395 np->get_tx_ctx->skb = NULL;
@@ -2434,9 +2422,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2434 nv_unmap_txskb(np, np->get_tx_ctx); 2422 nv_unmap_txskb(np, np->get_tx_ctx);
2435 2423
2436 if (flags & NV_TX2_LASTPACKET) { 2424 if (flags & NV_TX2_LASTPACKET) {
2437 if (!(flags & NV_TX2_ERROR)) 2425 if (flags & NV_TX2_ERROR) {
2438 dev->stats.tx_packets++;
2439 else {
2440 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { 2426 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2441 if (np->driver_data & DEV_HAS_GEAR_MODE) 2427 if (np->driver_data & DEV_HAS_GEAR_MODE)
2442 nv_gear_backoff_reseed(dev); 2428 nv_gear_backoff_reseed(dev);
@@ -2636,7 +2622,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2636 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { 2622 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2637 len = nv_getlen(dev, skb->data, len); 2623 len = nv_getlen(dev, skb->data, len);
2638 if (len < 0) { 2624 if (len < 0) {
2639 dev->stats.rx_errors++;
2640 dev_kfree_skb(skb); 2625 dev_kfree_skb(skb);
2641 goto next_pkt; 2626 goto next_pkt;
2642 } 2627 }
@@ -2650,11 +2635,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2650 else { 2635 else {
2651 if (flags & NV_RX_MISSEDFRAME) 2636 if (flags & NV_RX_MISSEDFRAME)
2652 dev->stats.rx_missed_errors++; 2637 dev->stats.rx_missed_errors++;
2653 if (flags & NV_RX_CRCERR)
2654 dev->stats.rx_crc_errors++;
2655 if (flags & NV_RX_OVERFLOW)
2656 dev->stats.rx_over_errors++;
2657 dev->stats.rx_errors++;
2658 dev_kfree_skb(skb); 2638 dev_kfree_skb(skb);
2659 goto next_pkt; 2639 goto next_pkt;
2660 } 2640 }
@@ -2670,7 +2650,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2670 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2650 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2671 len = nv_getlen(dev, skb->data, len); 2651 len = nv_getlen(dev, skb->data, len);
2672 if (len < 0) { 2652 if (len < 0) {
2673 dev->stats.rx_errors++;
2674 dev_kfree_skb(skb); 2653 dev_kfree_skb(skb);
2675 goto next_pkt; 2654 goto next_pkt;
2676 } 2655 }
@@ -2682,11 +2661,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2682 } 2661 }
2683 /* the rest are hard errors */ 2662 /* the rest are hard errors */
2684 else { 2663 else {
2685 if (flags & NV_RX2_CRCERR)
2686 dev->stats.rx_crc_errors++;
2687 if (flags & NV_RX2_OVERFLOW)
2688 dev->stats.rx_over_errors++;
2689 dev->stats.rx_errors++;
2690 dev_kfree_skb(skb); 2664 dev_kfree_skb(skb);
2691 goto next_pkt; 2665 goto next_pkt;
2692 } 2666 }
@@ -2704,7 +2678,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2704 skb->protocol = eth_type_trans(skb, dev); 2678 skb->protocol = eth_type_trans(skb, dev);
2705 napi_gro_receive(&np->napi, skb); 2679 napi_gro_receive(&np->napi, skb);
2706 dev->stats.rx_packets++; 2680 dev->stats.rx_packets++;
2707 dev->stats.rx_bytes += len;
2708next_pkt: 2681next_pkt:
2709 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2682 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2710 np->get_rx.orig = np->first_rx.orig; 2683 np->get_rx.orig = np->first_rx.orig;
@@ -2787,9 +2760,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2787 __vlan_hwaccel_put_tag(skb, vid); 2760 __vlan_hwaccel_put_tag(skb, vid);
2788 } 2761 }
2789 napi_gro_receive(&np->napi, skb); 2762 napi_gro_receive(&np->napi, skb);
2790
2791 dev->stats.rx_packets++; 2763 dev->stats.rx_packets++;
2792 dev->stats.rx_bytes += len;
2793 } else { 2764 } else {
2794 dev_kfree_skb(skb); 2765 dev_kfree_skb(skb);
2795 } 2766 }
@@ -2962,11 +2933,11 @@ static void nv_set_multicast(struct net_device *dev)
2962 struct netdev_hw_addr *ha; 2933 struct netdev_hw_addr *ha;
2963 2934
2964 netdev_for_each_mc_addr(ha, dev) { 2935 netdev_for_each_mc_addr(ha, dev) {
2965 unsigned char *addr = ha->addr; 2936 unsigned char *hw_addr = ha->addr;
2966 u32 a, b; 2937 u32 a, b;
2967 2938
2968 a = le32_to_cpu(*(__le32 *) addr); 2939 a = le32_to_cpu(*(__le32 *) hw_addr);
2969 b = le16_to_cpu(*(__le16 *) (&addr[4])); 2940 b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));
2970 alwaysOn[0] &= a; 2941 alwaysOn[0] &= a;
2971 alwaysOff[0] &= ~a; 2942 alwaysOff[0] &= ~a;
2972 alwaysOn[1] &= b; 2943 alwaysOn[1] &= b;
@@ -3398,7 +3369,8 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3398 3369
3399 for (i = 0;; i++) { 3370 for (i = 0;; i++) {
3400 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3371 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3401 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3372 writel(events, base + NvRegMSIXIrqStatus);
3373 netdev_dbg(dev, "tx irq events: %08x\n", events);
3402 if (!(events & np->irqmask)) 3374 if (!(events & np->irqmask))
3403 break; 3375 break;
3404 3376
@@ -3509,7 +3481,8 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3509 3481
3510 for (i = 0;; i++) { 3482 for (i = 0;; i++) {
3511 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3483 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3512 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3484 writel(events, base + NvRegMSIXIrqStatus);
3485 netdev_dbg(dev, "rx irq events: %08x\n", events);
3513 if (!(events & np->irqmask)) 3486 if (!(events & np->irqmask))
3514 break; 3487 break;
3515 3488
@@ -3553,7 +3526,8 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
3553 3526
3554 for (i = 0;; i++) { 3527 for (i = 0;; i++) {
3555 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3528 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3556 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3529 writel(events, base + NvRegMSIXIrqStatus);
3530 netdev_dbg(dev, "irq events: %08x\n", events);
3557 if (!(events & np->irqmask)) 3531 if (!(events & np->irqmask))
3558 break; 3532 break;
3559 3533
@@ -3617,10 +3591,10 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
3617 3591
3618 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3592 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3619 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3593 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3620 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3594 writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3621 } else { 3595 } else {
3622 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3596 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3623 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3597 writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3624 } 3598 }
3625 pci_push(base); 3599 pci_push(base);
3626 if (!(events & NVREG_IRQ_TIMER)) 3600 if (!(events & NVREG_IRQ_TIMER))
@@ -4566,7 +4540,7 @@ static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *e
4566 struct fe_priv *np = netdev_priv(dev); 4540 struct fe_priv *np = netdev_priv(dev);
4567 4541
4568 /* update stats */ 4542 /* update stats */
4569 nv_do_stats_poll((unsigned long)dev); 4543 nv_get_hw_stats(dev);
4570 4544
4571 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); 4545 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4572} 4546}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a3ce3d4561ed..74134970b709 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -192,6 +192,13 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
192 */ 192 */
193 macvlan_broadcast(skb, port, src->dev, 193 macvlan_broadcast(skb, port, src->dev,
194 MACVLAN_MODE_VEPA); 194 MACVLAN_MODE_VEPA);
195 else {
196 /* forward to original port. */
197 vlan = src;
198 ret = macvlan_broadcast_one(skb, vlan, eth, 0);
199 goto out;
200 }
201
195 return RX_HANDLER_PASS; 202 return RX_HANDLER_PASS;
196 } 203 }
197 204
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7d6082160bcc..fae0fbd8bc88 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1057,7 +1057,8 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1057 unsigned long flags; 1057 unsigned long flags;
1058 int retval; 1058 int retval;
1059 1059
1060 skb_tx_timestamp(skb); 1060 if (skb)
1061 skb_tx_timestamp(skb);
1061 1062
1062 // some devices want funky USB-level framing, for 1063 // some devices want funky USB-level framing, for
1063 // win32 driver (usually) and/or hardware quirks 1064 // win32 driver (usually) and/or hardware quirks
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index e0ab0657cc3a..88279e325dca 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -868,10 +868,6 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
868 /* Do PA Calibration */ 868 /* Do PA Calibration */
869 ar9002_hw_pa_cal(ah, true); 869 ar9002_hw_pa_cal(ah, true);
870 870
871 /* Do NF Calibration after DC offset and other calibrations */
872 ath9k_hw_loadnf(ah, chan);
873 ath9k_hw_start_nfcal(ah, true);
874
875 if (ah->caldata) 871 if (ah->caldata)
876 ah->caldata->nfcal_pending = true; 872 ah->caldata->nfcal_pending = true;
877 873
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 16851cb109a6..12a730dcb500 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -908,12 +908,15 @@ static bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
908 int i; 908 int i;
909 bool restore; 909 bool restore;
910 910
911 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT) || !ah->caldata) 911 if (!ah->caldata)
912 return false; 912 return false;
913 913
914 hist = &ah->caldata->rtt_hist; 914 hist = &ah->caldata->rtt_hist;
915 if (!hist->num_readings)
916 return false;
917
915 ar9003_hw_rtt_enable(ah); 918 ar9003_hw_rtt_enable(ah);
916 ar9003_hw_rtt_set_mask(ah, 0x10); 919 ar9003_hw_rtt_set_mask(ah, 0x00);
917 for (i = 0; i < AR9300_MAX_CHAINS; i++) { 920 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
918 if (!(ah->rxchainmask & (1 << i))) 921 if (!(ah->rxchainmask & (1 << i)))
919 continue; 922 continue;
@@ -1070,6 +1073,7 @@ skip_tx_iqcal:
1070 if (is_reusable && (hist->num_readings < RTT_HIST_MAX)) { 1073 if (is_reusable && (hist->num_readings < RTT_HIST_MAX)) {
1071 u32 *table; 1074 u32 *table;
1072 1075
1076 hist->num_readings++;
1073 for (i = 0; i < AR9300_MAX_CHAINS; i++) { 1077 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
1074 if (!(ah->rxchainmask & (1 << i))) 1078 if (!(ah->rxchainmask & (1 << i)))
1075 continue; 1079 continue;
@@ -1081,9 +1085,6 @@ skip_tx_iqcal:
1081 ar9003_hw_rtt_disable(ah); 1085 ar9003_hw_rtt_disable(ah);
1082 } 1086 }
1083 1087
1084 ath9k_hw_loadnf(ah, chan);
1085 ath9k_hw_start_nfcal(ah, true);
1086
1087 /* Initialize list pointers */ 1088 /* Initialize list pointers */
1088 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; 1089 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
1089 ah->supp_cals = IQ_MISMATCH_CAL; 1090 ah->supp_cals = IQ_MISMATCH_CAL;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 2f4023e66081..4114fe752c6b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -572,14 +572,14 @@
572 572
573#define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300) 573#define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300)
574 574
575#define AR_PHY_TX_IQCAL_CONTROL_0 (AR_SM_BASE + AR_SREV_9485(ah) ? \ 575#define AR_PHY_TX_IQCAL_CONTROL_0 (AR_SM_BASE + (AR_SREV_9485(ah) ? \
576 0x3c4 : 0x444) 576 0x3c4 : 0x444))
577#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + AR_SREV_9485(ah) ? \ 577#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + (AR_SREV_9485(ah) ? \
578 0x3c8 : 0x448) 578 0x3c8 : 0x448))
579#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + AR_SREV_9485(ah) ? \ 579#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + (AR_SREV_9485(ah) ? \
580 0x3c4 : 0x440) 580 0x3c4 : 0x440))
581#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + AR_SREV_9485(ah) ? \ 581#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + (AR_SREV_9485(ah) ? \
582 0x3f0 : 0x48c) 582 0x3f0 : 0x48c))
583#define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_i) (AR_SM_BASE + \ 583#define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_i) (AR_SM_BASE + \
584 (AR_SREV_9485(ah) ? \ 584 (AR_SREV_9485(ah) ? \
585 0x3d0 : 0x450) + ((_i) << 2)) 585 0x3d0 : 0x450) + ((_i) << 2))
@@ -651,7 +651,7 @@
651#define AR_SWITCH_TABLE_ALL_S (0) 651#define AR_SWITCH_TABLE_ALL_S (0)
652 652
653#define AR_PHY_65NM_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\ 653#define AR_PHY_65NM_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\
654 (AR_SREV_9485(ah) ? 0x1628c : 0x16294)) 654 (AR_SREV_9462(ah) ? 0x16294 : 0x1628c))
655 655
656#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000 656#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000
657#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31 657#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
@@ -668,12 +668,12 @@
668#define AR_PHY_65NM_CH2_RXTX2 0x16904 668#define AR_PHY_65NM_CH2_RXTX2 0x16904
669 669
670#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \ 670#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \
671 (AR_SREV_9485(ah) ? 0x16284 : 0x16290)) 671 (AR_SREV_9462(ah) ? 0x16290 : 0x16284))
672#define AR_CH0_TOP2_XPABIASLVL 0xf000 672#define AR_CH0_TOP2_XPABIASLVL 0xf000
673#define AR_CH0_TOP2_XPABIASLVL_S 12 673#define AR_CH0_TOP2_XPABIASLVL_S 12
674 674
675#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \ 675#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \
676 (AR_SREV_9485(ah) ? 0x16290 : 0x16298)) 676 (AR_SREV_9462(ah) ? 0x16298 : 0x16290))
677#define AR_CH0_XTAL_CAPINDAC 0x7f000000 677#define AR_CH0_XTAL_CAPINDAC 0x7f000000
678#define AR_CH0_XTAL_CAPINDAC_S 24 678#define AR_CH0_XTAL_CAPINDAC_S 24
679#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000 679#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000
@@ -908,8 +908,8 @@
908#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208) 908#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208)
909#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c) 909#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c)
910#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220) 910#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
911#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_AR9300(ah) ? \ 911#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_AR9462(ah) ? \
912 0x240 : 0x280)) 912 0x280 : 0x240))
913#define AR_PHY_TPC_19_B1 (AR_SM1_BASE + 0x240) 913#define AR_PHY_TPC_19_B1 (AR_SM1_BASE + 0x240)
914#define AR_PHY_TPC_19_B1_ALPHA_THERM 0xff 914#define AR_PHY_TPC_19_B1_ALPHA_THERM 0xff
915#define AR_PHY_TPC_19_B1_ALPHA_THERM_S 0 915#define AR_PHY_TPC_19_B1_ALPHA_THERM_S 0
@@ -931,10 +931,10 @@
931#define AR_PHY_AIC_SRAM_ADDR_B1 (AR_SM1_BASE + 0x5f0) 931#define AR_PHY_AIC_SRAM_ADDR_B1 (AR_SM1_BASE + 0x5f0)
932#define AR_PHY_AIC_SRAM_DATA_B1 (AR_SM1_BASE + 0x5f4) 932#define AR_PHY_AIC_SRAM_DATA_B1 (AR_SM1_BASE + 0x5f4)
933 933
934#define AR_PHY_RTT_TABLE_SW_INTF_B(i) (0x384 + (i) ? \ 934#define AR_PHY_RTT_TABLE_SW_INTF_B(i) (0x384 + ((i) ? \
935 AR_SM1_BASE : AR_SM_BASE) 935 AR_SM1_BASE : AR_SM_BASE))
936#define AR_PHY_RTT_TABLE_SW_INTF_1_B(i) (0x388 + (i) ? \ 936#define AR_PHY_RTT_TABLE_SW_INTF_1_B(i) (0x388 + ((i) ? \
937 AR_SM1_BASE : AR_SM_BASE) 937 AR_SM1_BASE : AR_SM_BASE))
938/* 938/*
939 * Channel 2 Register Map 939 * Channel 2 Register Map
940 */ 940 */
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 611ea6ce8508..d16d029f81a9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -521,7 +521,7 @@ static const u32 ar9485_1_1_radio_postamble[][2] = {
521 {0x000160ac, 0x24611800}, 521 {0x000160ac, 0x24611800},
522 {0x000160b0, 0x03284f3e}, 522 {0x000160b0, 0x03284f3e},
523 {0x0001610c, 0x00170000}, 523 {0x0001610c, 0x00170000},
524 {0x00016140, 0x10804008}, 524 {0x00016140, 0x50804008},
525}; 525};
526 526
527static const u32 ar9485_1_1_mac_postamble[][5] = { 527static const u32 ar9485_1_1_mac_postamble[][5] = {
@@ -603,7 +603,7 @@ static const u32 ar9485_1_1_radio_core[][2] = {
603 603
604static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = { 604static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
605 /* Addr allmodes */ 605 /* Addr allmodes */
606 {0x00018c00, 0x10052e5e}, 606 {0x00018c00, 0x18052e5e},
607 {0x00018c04, 0x000801d8}, 607 {0x00018c04, 0x000801d8},
608 {0x00018c08, 0x0000080c}, 608 {0x00018c08, 0x0000080c},
609}; 609};
@@ -776,7 +776,7 @@ static const u32 ar9485_modes_green_ob_db_tx_gain_1_1[][5] = {
776 776
777static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = { 777static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
778 /* Addr allmodes */ 778 /* Addr allmodes */
779 {0x00018c00, 0x10013e5e}, 779 {0x00018c00, 0x18013e5e},
780 {0x00018c04, 0x000801d8}, 780 {0x00018c04, 0x000801d8},
781 {0x00018c08, 0x0000080c}, 781 {0x00018c08, 0x0000080c},
782}; 782};
@@ -882,7 +882,7 @@ static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
882 882
883static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = { 883static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
884 /* Addr allmodes */ 884 /* Addr allmodes */
885 {0x00018c00, 0x10012e5e}, 885 {0x00018c00, 0x18012e5e},
886 {0x00018c04, 0x000801d8}, 886 {0x00018c04, 0x000801d8},
887 {0x00018c08, 0x0000080c}, 887 {0x00018c08, 0x0000080c},
888}; 888};
@@ -1021,7 +1021,7 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
1021 1021
1022static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = { 1022static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = {
1023 /* Addr allmodes */ 1023 /* Addr allmodes */
1024 {0x00018c00, 0x10053e5e}, 1024 {0x00018c00, 0x18053e5e},
1025 {0x00018c04, 0x000801d8}, 1025 {0x00018c04, 0x000801d8},
1026 {0x00018c08, 0x0000080c}, 1026 {0x00018c08, 0x0000080c},
1027}; 1027};
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 4952ad8c4e8c..2f91acccb7db 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1725,6 +1725,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1725 if (!ath9k_hw_init_cal(ah, chan)) 1725 if (!ath9k_hw_init_cal(ah, chan))
1726 return -EIO; 1726 return -EIO;
1727 1727
1728 ath9k_hw_loadnf(ah, chan);
1729 ath9k_hw_start_nfcal(ah, true);
1730
1728 ENABLE_REGWRITE_BUFFER(ah); 1731 ENABLE_REGWRITE_BUFFER(ah);
1729 1732
1730 ath9k_hw_restore_chainmask(ah); 1733 ath9k_hw_restore_chainmask(ah);
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index d20946939cd8..59472e1605cd 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -296,7 +296,8 @@ static void carl9170_tx_release(struct kref *ref)
296 super = (void *)skb->data; 296 super = (void *)skb->data;
297 txinfo->status.ampdu_len = super->s.rix; 297 txinfo->status.ampdu_len = super->s.rix;
298 txinfo->status.ampdu_ack_len = super->s.cnt; 298 txinfo->status.ampdu_ack_len = super->s.cnt;
299 } else if (txinfo->flags & IEEE80211_TX_STAT_ACK) { 299 } else if ((txinfo->flags & IEEE80211_TX_STAT_ACK) &&
300 !(txinfo->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
300 /* 301 /*
301 * drop redundant tx_status reports: 302 * drop redundant tx_status reports:
302 * 303 *
@@ -308,15 +309,17 @@ static void carl9170_tx_release(struct kref *ref)
308 * 309 *
309 * 3. minstrel_ht is picky, it only accepts 310 * 3. minstrel_ht is picky, it only accepts
310 * reports of frames with the TX_STATUS_AMPDU flag. 311 * reports of frames with the TX_STATUS_AMPDU flag.
312 *
313 * 4. mac80211 is not particularly interested in
314 * feedback either [CTL_REQ_TX_STATUS not set]
311 */ 315 */
312 316
313 dev_kfree_skb_any(skb); 317 dev_kfree_skb_any(skb);
314 return; 318 return;
315 } else { 319 } else {
316 /* 320 /*
317 * Frame has failed, but we want to keep it in 321 * Either the frame transmission has failed or
318 * case it was lost due to a power-state 322 * mac80211 requested tx status.
319 * transition.
320 */ 323 */
321 } 324 }
322 } 325 }
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index c73e8600d218..58ea0e5fabfd 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -827,7 +827,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
827#endif 827#endif
828 return; 828 return;
829drop: 829drop:
830 b43dbg(dev->wl, "RX: Packet dropped\n");
831 dev_kfree_skb_any(skb); 830 dev_kfree_skb_any(skb);
832} 831}
833 832
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index b247a56d5135..001fdf140abb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1755,16 +1755,6 @@ static inline int iwl_check_stuck_queue(struct iwl_priv *priv, int txq)
1755{ 1755{
1756 if (iwl_trans_check_stuck_queue(trans(priv), txq)) { 1756 if (iwl_trans_check_stuck_queue(trans(priv), txq)) {
1757 int ret; 1757 int ret;
1758 if (txq == priv->shrd->cmd_queue) {
1759 /*
1760 * validate command queue still working
1761 * by sending "ECHO" command
1762 */
1763 if (!iwl_cmd_echo_test(priv))
1764 return 0;
1765 else
1766 IWL_DEBUG_HC(priv, "echo testing fail\n");
1767 }
1768 ret = iwl_force_reset(priv, IWL_FW_RESET, false); 1758 ret = iwl_force_reset(priv, IWL_FW_RESET, false);
1769 return (ret == -EAGAIN) ? 0 : 1; 1759 return (ret == -EAGAIN) ? 0 : 1;
1770 } 1760 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index f0c623ade3ff..1800029911ad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -446,10 +446,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
446 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 446 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
447 447
448 err = pci_enable_msi(pdev); 448 err = pci_enable_msi(pdev);
449 if (err) { 449 if (err)
450 dev_printk(KERN_ERR, &pdev->dev, "pci_enable_msi failed"); 450 dev_printk(KERN_ERR, &pdev->dev,
451 goto out_iounmap; 451 "pci_enable_msi failed(0X%x)", err);
452 }
453 452
454 /* TODO: Move this away, not needed if not MSI */ 453 /* TODO: Move this away, not needed if not MSI */
455 /* enable rfkill interrupt: hw bug w/a */ 454 /* enable rfkill interrupt: hw bug w/a */
@@ -470,7 +469,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
470 469
471out_disable_msi: 470out_disable_msi:
472 pci_disable_msi(pdev); 471 pci_disable_msi(pdev);
473out_iounmap:
474 pci_iounmap(pdev, pci_bus->hw_base); 472 pci_iounmap(pdev, pci_bus->hw_base);
475out_pci_release_regions: 473out_pci_release_regions:
476 pci_set_drvdata(pdev, NULL); 474 pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 8e8c75c997ee..da3411057afc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -407,6 +407,7 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
407 struct iwl_queue *q = &txq->q; 407 struct iwl_queue *q = &txq->q;
408 enum dma_data_direction dma_dir; 408 enum dma_data_direction dma_dir;
409 unsigned long flags; 409 unsigned long flags;
410 spinlock_t *lock;
410 411
411 if (!q->n_bd) 412 if (!q->n_bd)
412 return; 413 return;
@@ -414,19 +415,22 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
414 /* In the command queue, all the TBs are mapped as BIDI 415 /* In the command queue, all the TBs are mapped as BIDI
415 * so unmap them as such. 416 * so unmap them as such.
416 */ 417 */
417 if (txq_id == trans->shrd->cmd_queue) 418 if (txq_id == trans->shrd->cmd_queue) {
418 dma_dir = DMA_BIDIRECTIONAL; 419 dma_dir = DMA_BIDIRECTIONAL;
419 else 420 lock = &trans->hcmd_lock;
421 } else {
420 dma_dir = DMA_TO_DEVICE; 422 dma_dir = DMA_TO_DEVICE;
423 lock = &trans->shrd->sta_lock;
424 }
421 425
422 spin_lock_irqsave(&trans->shrd->sta_lock, flags); 426 spin_lock_irqsave(lock, flags);
423 while (q->write_ptr != q->read_ptr) { 427 while (q->write_ptr != q->read_ptr) {
424 /* The read_ptr needs to bound by q->n_window */ 428 /* The read_ptr needs to bound by q->n_window */
425 iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr), 429 iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
426 dma_dir); 430 dma_dir);
427 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 431 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
428 } 432 }
429 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); 433 spin_unlock_irqrestore(lock, flags);
430} 434}
431 435
432/** 436/**
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index ff6378276ff0..4fcd653bddc4 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -728,15 +728,9 @@ static void lbs_scan_worker(struct work_struct *work)
728 le16_to_cpu(scan_cmd->hdr.size), 728 le16_to_cpu(scan_cmd->hdr.size),
729 lbs_ret_scan, 0); 729 lbs_ret_scan, 0);
730 730
731 if (priv->scan_channel >= priv->scan_req->n_channels) { 731 if (priv->scan_channel >= priv->scan_req->n_channels)
732 /* Mark scan done */ 732 /* Mark scan done */
733 if (priv->internal_scan) 733 lbs_scan_done(priv);
734 kfree(priv->scan_req);
735 else
736 cfg80211_scan_done(priv->scan_req, false);
737
738 priv->scan_req = NULL;
739 }
740 734
741 /* Restart network */ 735 /* Restart network */
742 if (carrier) 736 if (carrier)
@@ -774,6 +768,21 @@ static void _internal_start_scan(struct lbs_private *priv, bool internal,
774 lbs_deb_leave(LBS_DEB_CFG80211); 768 lbs_deb_leave(LBS_DEB_CFG80211);
775} 769}
776 770
771/*
772 * Clean up priv->scan_req. Should be used to handle the allocation details.
773 */
774void lbs_scan_done(struct lbs_private *priv)
775{
776 WARN_ON(!priv->scan_req);
777
778 if (priv->internal_scan)
779 kfree(priv->scan_req);
780 else
781 cfg80211_scan_done(priv->scan_req, false);
782
783 priv->scan_req = NULL;
784}
785
777static int lbs_cfg_scan(struct wiphy *wiphy, 786static int lbs_cfg_scan(struct wiphy *wiphy,
778 struct net_device *dev, 787 struct net_device *dev,
779 struct cfg80211_scan_request *request) 788 struct cfg80211_scan_request *request)
diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h
index a02ee151710e..558168ce634d 100644
--- a/drivers/net/wireless/libertas/cfg.h
+++ b/drivers/net/wireless/libertas/cfg.h
@@ -16,6 +16,7 @@ int lbs_reg_notifier(struct wiphy *wiphy,
16void lbs_send_disconnect_notification(struct lbs_private *priv); 16void lbs_send_disconnect_notification(struct lbs_private *priv);
17void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event); 17void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
18 18
19void lbs_scan_done(struct lbs_private *priv);
19void lbs_scan_deinit(struct lbs_private *priv); 20void lbs_scan_deinit(struct lbs_private *priv);
20int lbs_disconnect(struct lbs_private *priv, u16 reason); 21int lbs_disconnect(struct lbs_private *priv, u16 reason);
21 22
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 4ae99a40dbf7..957681dede17 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -255,10 +255,8 @@ static int lbs_eth_stop(struct net_device *dev)
255 255
256 lbs_update_mcast(priv); 256 lbs_update_mcast(priv);
257 cancel_delayed_work_sync(&priv->scan_work); 257 cancel_delayed_work_sync(&priv->scan_work);
258 if (priv->scan_req) { 258 if (priv->scan_req)
259 cfg80211_scan_done(priv->scan_req, false); 259 lbs_scan_done(priv);
260 priv->scan_req = NULL;
261 }
262 260
263 netif_carrier_off(priv->dev); 261 netif_carrier_off(priv->dev);
264 262
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index ef566443f945..e17e2f8001d2 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -2,23 +2,17 @@
2# PINCTRL infrastructure and drivers 2# PINCTRL infrastructure and drivers
3# 3#
4 4
5menuconfig PINCTRL 5config PINCTRL
6 bool "PINCTRL Support" 6 bool
7 depends on EXPERIMENTAL 7 depends on EXPERIMENTAL
8 help
9 This enables the PINCTRL subsystem for controlling pins
10 on chip packages, for example multiplexing pins on primarily
11 PGA and BGA packages for systems on chip.
12
13 If unsure, say N.
14 8
15if PINCTRL 9if PINCTRL
16 10
11menu "Pin controllers"
12 depends on PINCTRL
13
17config PINMUX 14config PINMUX
18 bool "Support pinmux controllers" 15 bool "Support pinmux controllers"
19 help
20 Say Y here if you want the pincontrol subsystem to handle pin
21 multiplexing drivers.
22 16
23config DEBUG_PINCTRL 17config DEBUG_PINCTRL
24 bool "Debug PINCTRL calls" 18 bool "Debug PINCTRL calls"
@@ -30,14 +24,12 @@ config PINMUX_SIRF
30 bool "CSR SiRFprimaII pinmux driver" 24 bool "CSR SiRFprimaII pinmux driver"
31 depends on ARCH_PRIMA2 25 depends on ARCH_PRIMA2
32 select PINMUX 26 select PINMUX
33 help
34 Say Y here to enable the SiRFprimaII pinmux driver
35 27
36config PINMUX_U300 28config PINMUX_U300
37 bool "U300 pinmux driver" 29 bool "U300 pinmux driver"
38 depends on ARCH_U300 30 depends on ARCH_U300
39 select PINMUX 31 select PINMUX
40 help 32
41 Say Y here to enable the U300 pinmux driver 33endmenu
42 34
43endif 35endif
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index f4e3d82379d7..7f43cf86d776 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -83,8 +83,10 @@ config DELL_LAPTOP
83 depends on EXPERIMENTAL 83 depends on EXPERIMENTAL
84 depends on BACKLIGHT_CLASS_DEVICE 84 depends on BACKLIGHT_CLASS_DEVICE
85 depends on RFKILL || RFKILL = n 85 depends on RFKILL || RFKILL = n
86 depends on POWER_SUPPLY
87 depends on SERIO_I8042 86 depends on SERIO_I8042
87 select POWER_SUPPLY
88 select LEDS_CLASS
89 select NEW_LEDS
88 default n 90 default n
89 ---help--- 91 ---help---
90 This driver adds support for rfkill and backlight control to Dell 92 This driver adds support for rfkill and backlight control to Dell
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index bbf3edd85beb..5be4a392a3ae 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -509,15 +509,12 @@ static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev,
509 struct acpi_resource_dma *p) 509 struct acpi_resource_dma *p)
510{ 510{
511 int i; 511 int i;
512 unsigned char map = 0, flags = 0; 512 unsigned char map = 0, flags;
513
514 if (p->channel_count == 0)
515 flags |= IORESOURCE_DISABLED;
516 513
517 for (i = 0; i < p->channel_count; i++) 514 for (i = 0; i < p->channel_count; i++)
518 map |= 1 << p->channels[i]; 515 map |= 1 << p->channels[i];
519 516
520 flags |= dma_flags(dev, p->type, p->bus_master, p->transfer); 517 flags = dma_flags(dev, p->type, p->bus_master, p->transfer);
521 pnp_register_dma_resource(dev, option_flags, map, flags); 518 pnp_register_dma_resource(dev, option_flags, map, flags);
522} 519}
523 520
@@ -527,17 +524,14 @@ static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev,
527{ 524{
528 int i; 525 int i;
529 pnp_irq_mask_t map; 526 pnp_irq_mask_t map;
530 unsigned char flags = 0; 527 unsigned char flags;
531
532 if (p->interrupt_count == 0)
533 flags |= IORESOURCE_DISABLED;
534 528
535 bitmap_zero(map.bits, PNP_IRQ_NR); 529 bitmap_zero(map.bits, PNP_IRQ_NR);
536 for (i = 0; i < p->interrupt_count; i++) 530 for (i = 0; i < p->interrupt_count; i++)
537 if (p->interrupts[i]) 531 if (p->interrupts[i])
538 __set_bit(p->interrupts[i], map.bits); 532 __set_bit(p->interrupts[i], map.bits);
539 533
540 flags |= irq_flags(p->triggering, p->polarity, p->sharable); 534 flags = irq_flags(p->triggering, p->polarity, p->sharable);
541 pnp_register_irq_resource(dev, option_flags, &map, flags); 535 pnp_register_irq_resource(dev, option_flags, &map, flags);
542} 536}
543 537
@@ -547,10 +541,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
547{ 541{
548 int i; 542 int i;
549 pnp_irq_mask_t map; 543 pnp_irq_mask_t map;
550 unsigned char flags = 0; 544 unsigned char flags;
551
552 if (p->interrupt_count == 0)
553 flags |= IORESOURCE_DISABLED;
554 545
555 bitmap_zero(map.bits, PNP_IRQ_NR); 546 bitmap_zero(map.bits, PNP_IRQ_NR);
556 for (i = 0; i < p->interrupt_count; i++) { 547 for (i = 0; i < p->interrupt_count; i++) {
@@ -564,7 +555,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
564 } 555 }
565 } 556 }
566 557
567 flags |= irq_flags(p->triggering, p->polarity, p->sharable); 558 flags = irq_flags(p->triggering, p->polarity, p->sharable);
568 pnp_register_irq_resource(dev, option_flags, &map, flags); 559 pnp_register_irq_resource(dev, option_flags, &map, flags);
569} 560}
570 561
@@ -574,11 +565,8 @@ static __init void pnpacpi_parse_port_option(struct pnp_dev *dev,
574{ 565{
575 unsigned char flags = 0; 566 unsigned char flags = 0;
576 567
577 if (io->address_length == 0)
578 flags |= IORESOURCE_DISABLED;
579
580 if (io->io_decode == ACPI_DECODE_16) 568 if (io->io_decode == ACPI_DECODE_16)
581 flags |= IORESOURCE_IO_16BIT_ADDR; 569 flags = IORESOURCE_IO_16BIT_ADDR;
582 pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum, 570 pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum,
583 io->alignment, io->address_length, flags); 571 io->alignment, io->address_length, flags);
584} 572}
@@ -587,13 +575,8 @@ static __init void pnpacpi_parse_fixed_port_option(struct pnp_dev *dev,
587 unsigned int option_flags, 575 unsigned int option_flags,
588 struct acpi_resource_fixed_io *io) 576 struct acpi_resource_fixed_io *io)
589{ 577{
590 unsigned char flags = 0;
591
592 if (io->address_length == 0)
593 flags |= IORESOURCE_DISABLED;
594
595 pnp_register_port_resource(dev, option_flags, io->address, io->address, 578 pnp_register_port_resource(dev, option_flags, io->address, io->address,
596 0, io->address_length, flags | IORESOURCE_IO_FIXED); 579 0, io->address_length, IORESOURCE_IO_FIXED);
597} 580}
598 581
599static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev, 582static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev,
@@ -602,11 +585,8 @@ static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev,
602{ 585{
603 unsigned char flags = 0; 586 unsigned char flags = 0;
604 587
605 if (p->address_length == 0)
606 flags |= IORESOURCE_DISABLED;
607
608 if (p->write_protect == ACPI_READ_WRITE_MEMORY) 588 if (p->write_protect == ACPI_READ_WRITE_MEMORY)
609 flags |= IORESOURCE_MEM_WRITEABLE; 589 flags = IORESOURCE_MEM_WRITEABLE;
610 pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, 590 pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum,
611 p->alignment, p->address_length, flags); 591 p->alignment, p->address_length, flags);
612} 592}
@@ -617,11 +597,8 @@ static __init void pnpacpi_parse_mem32_option(struct pnp_dev *dev,
617{ 597{
618 unsigned char flags = 0; 598 unsigned char flags = 0;
619 599
620 if (p->address_length == 0)
621 flags |= IORESOURCE_DISABLED;
622
623 if (p->write_protect == ACPI_READ_WRITE_MEMORY) 600 if (p->write_protect == ACPI_READ_WRITE_MEMORY)
624 flags |= IORESOURCE_MEM_WRITEABLE; 601 flags = IORESOURCE_MEM_WRITEABLE;
625 pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, 602 pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum,
626 p->alignment, p->address_length, flags); 603 p->alignment, p->address_length, flags);
627} 604}
@@ -632,11 +609,8 @@ static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_dev *dev,
632{ 609{
633 unsigned char flags = 0; 610 unsigned char flags = 0;
634 611
635 if (p->address_length == 0)
636 flags |= IORESOURCE_DISABLED;
637
638 if (p->write_protect == ACPI_READ_WRITE_MEMORY) 612 if (p->write_protect == ACPI_READ_WRITE_MEMORY)
639 flags |= IORESOURCE_MEM_WRITEABLE; 613 flags = IORESOURCE_MEM_WRITEABLE;
640 pnp_register_mem_resource(dev, option_flags, p->address, p->address, 614 pnp_register_mem_resource(dev, option_flags, p->address, p->address,
641 0, p->address_length, flags); 615 0, p->address_length, flags);
642} 616}
@@ -656,19 +630,16 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
656 return; 630 return;
657 } 631 }
658 632
659 if (p->address_length == 0)
660 flags |= IORESOURCE_DISABLED;
661
662 if (p->resource_type == ACPI_MEMORY_RANGE) { 633 if (p->resource_type == ACPI_MEMORY_RANGE) {
663 if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) 634 if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
664 flags |= IORESOURCE_MEM_WRITEABLE; 635 flags = IORESOURCE_MEM_WRITEABLE;
665 pnp_register_mem_resource(dev, option_flags, p->minimum, 636 pnp_register_mem_resource(dev, option_flags, p->minimum,
666 p->minimum, 0, p->address_length, 637 p->minimum, 0, p->address_length,
667 flags); 638 flags);
668 } else if (p->resource_type == ACPI_IO_RANGE) 639 } else if (p->resource_type == ACPI_IO_RANGE)
669 pnp_register_port_resource(dev, option_flags, p->minimum, 640 pnp_register_port_resource(dev, option_flags, p->minimum,
670 p->minimum, 0, p->address_length, 641 p->minimum, 0, p->address_length,
671 flags | IORESOURCE_IO_FIXED); 642 IORESOURCE_IO_FIXED);
672} 643}
673 644
674static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev, 645static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev,
@@ -678,19 +649,16 @@ static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev,
678 struct acpi_resource_extended_address64 *p = &r->data.ext_address64; 649 struct acpi_resource_extended_address64 *p = &r->data.ext_address64;
679 unsigned char flags = 0; 650 unsigned char flags = 0;
680 651
681 if (p->address_length == 0)
682 flags |= IORESOURCE_DISABLED;
683
684 if (p->resource_type == ACPI_MEMORY_RANGE) { 652 if (p->resource_type == ACPI_MEMORY_RANGE) {
685 if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) 653 if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
686 flags |= IORESOURCE_MEM_WRITEABLE; 654 flags = IORESOURCE_MEM_WRITEABLE;
687 pnp_register_mem_resource(dev, option_flags, p->minimum, 655 pnp_register_mem_resource(dev, option_flags, p->minimum,
688 p->minimum, 0, p->address_length, 656 p->minimum, 0, p->address_length,
689 flags); 657 flags);
690 } else if (p->resource_type == ACPI_IO_RANGE) 658 } else if (p->resource_type == ACPI_IO_RANGE)
691 pnp_register_port_resource(dev, option_flags, p->minimum, 659 pnp_register_port_resource(dev, option_flags, p->minimum,
692 p->minimum, 0, p->address_length, 660 p->minimum, 0, p->address_length,
693 flags | IORESOURCE_IO_FIXED); 661 IORESOURCE_IO_FIXED);
694} 662}
695 663
696struct acpipnp_parse_option_s { 664struct acpipnp_parse_option_s {
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
index d9fb729535a1..fb7300837fee 100644
--- a/drivers/ps3/ps3-vuart.c
+++ b/drivers/ps3/ps3-vuart.c
@@ -952,7 +952,7 @@ static int ps3_vuart_bus_interrupt_get(void)
952 } 952 }
953 953
954 result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler, 954 result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler,
955 IRQF_DISABLED, "vuart", &vuart_bus_priv); 955 0, "vuart", &vuart_bus_priv);
956 956
957 if (result) { 957 if (result) {
958 pr_debug("%s:%d: request_irq failed (%d)\n", 958 pr_debug("%s:%d: request_irq failed (%d)\n",
diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
index cc328dec946b..8c3f5adf1bc6 100644
--- a/drivers/ps3/ps3stor_lib.c
+++ b/drivers/ps3/ps3stor_lib.c
@@ -167,7 +167,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler)
167 goto fail_close_device; 167 goto fail_close_device;
168 } 168 }
169 169
170 error = request_irq(dev->irq, handler, IRQF_DISABLED, 170 error = request_irq(dev->irq, handler, 0,
171 dev->sbd.core.driver->name, dev); 171 dev->sbd.core.driver->name, dev);
172 if (error) { 172 if (error) {
173 dev_err(&dev->sbd.core, "%s:%u: request_irq failed %d\n", 173 dev_err(&dev->sbd.core, "%s:%u: request_irq failed %d\n",
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index d33544802a2e..bb21f443fb70 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -76,12 +76,15 @@ static inline unsigned char vrtc_is_updating(void)
76/* 76/*
77 * rtc_time's year contains the increment over 1900, but vRTC's YEAR 77 * rtc_time's year contains the increment over 1900, but vRTC's YEAR
78 * register can't be programmed to value larger than 0x64, so vRTC 78 * register can't be programmed to value larger than 0x64, so vRTC
79 * driver chose to use 1960 (1970 is UNIX time start point) as the base, 79 * driver chose to use 1972 (1970 is UNIX time start point) as the base,
80 * and does the translation at read/write time. 80 * and does the translation at read/write time.
81 * 81 *
82 * Why not just use 1970 as the offset? it's because using 1960 will 82 * Why not just use 1970 as the offset? it's because using 1972 will
83 * make it consistent in leap year setting for both vrtc and low-level 83 * make it consistent in leap year setting for both vrtc and low-level
84 * physical rtc devices. 84 * physical rtc devices. Then why not use 1960 as the offset? If we use
85 * 1960, for a device's first use, its YEAR register is 0 and the system
86 * year will be parsed as 1960 which is not a valid UNIX time and will
87 * cause many applications to fail mysteriously.
85 */ 88 */
86static int mrst_read_time(struct device *dev, struct rtc_time *time) 89static int mrst_read_time(struct device *dev, struct rtc_time *time)
87{ 90{
@@ -99,10 +102,10 @@ static int mrst_read_time(struct device *dev, struct rtc_time *time)
99 time->tm_year = vrtc_cmos_read(RTC_YEAR); 102 time->tm_year = vrtc_cmos_read(RTC_YEAR);
100 spin_unlock_irqrestore(&rtc_lock, flags); 103 spin_unlock_irqrestore(&rtc_lock, flags);
101 104
102 /* Adjust for the 1960/1900 */ 105 /* Adjust for the 1972/1900 */
103 time->tm_year += 60; 106 time->tm_year += 72;
104 time->tm_mon--; 107 time->tm_mon--;
105 return RTC_24H; 108 return rtc_valid_tm(time);
106} 109}
107 110
108static int mrst_set_time(struct device *dev, struct rtc_time *time) 111static int mrst_set_time(struct device *dev, struct rtc_time *time)
@@ -119,9 +122,9 @@ static int mrst_set_time(struct device *dev, struct rtc_time *time)
119 min = time->tm_min; 122 min = time->tm_min;
120 sec = time->tm_sec; 123 sec = time->tm_sec;
121 124
122 if (yrs < 70 || yrs > 138) 125 if (yrs < 72 || yrs > 138)
123 return -EINVAL; 126 return -EINVAL;
124 yrs -= 60; 127 yrs -= 72;
125 128
126 spin_lock_irqsave(&rtc_lock, flags); 129 spin_lock_irqsave(&rtc_lock, flags);
127 130
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 24e6cec0ae8d..67e272ab1623 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -7,3 +7,11 @@ obj-$(CONFIG_HAVE_CLK) += clk/
7obj-$(CONFIG_MAPLE) += maple/ 7obj-$(CONFIG_MAPLE) += maple/
8obj-$(CONFIG_SUPERHYWAY) += superhyway/ 8obj-$(CONFIG_SUPERHYWAY) += superhyway/
9obj-$(CONFIG_GENERIC_GPIO) += pfc.o 9obj-$(CONFIG_GENERIC_GPIO) += pfc.o
10
11#
12# For the moment we only use this framework for ARM-based SH/R-Mobile
13# platforms and generic SH. SH-based SH-Mobile platforms are still using
14# an older framework that is pending up-porting, at which point this
15# special casing can go away.
16#
17obj-$(CONFIG_SUPERH)$(CONFIG_ARCH_SHMOBILE) += pm_runtime.o
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index dc8d022c07a1..db257a35e71a 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -25,7 +25,6 @@
25#include <linux/seq_file.h> 25#include <linux/seq_file.h>
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/io.h> 27#include <linux/io.h>
28#include <linux/debugfs.h>
29#include <linux/cpufreq.h> 28#include <linux/cpufreq.h>
30#include <linux/clk.h> 29#include <linux/clk.h>
31#include <linux/sh_clk.h> 30#include <linux/sh_clk.h>
@@ -173,6 +172,26 @@ long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
173 return clk_rate_round_helper(&div_range_round); 172 return clk_rate_round_helper(&div_range_round);
174} 173}
175 174
175static long clk_rate_mult_range_iter(unsigned int pos,
176 struct clk_rate_round_data *rounder)
177{
178 return clk_get_rate(rounder->arg) * pos;
179}
180
181long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
182 unsigned int mult_max, unsigned long rate)
183{
184 struct clk_rate_round_data mult_range_round = {
185 .min = mult_min,
186 .max = mult_max,
187 .func = clk_rate_mult_range_iter,
188 .arg = clk_get_parent(clk),
189 .rate = rate,
190 };
191
192 return clk_rate_round_helper(&mult_range_round);
193}
194
176int clk_rate_table_find(struct clk *clk, 195int clk_rate_table_find(struct clk *clk,
177 struct cpufreq_frequency_table *freq_table, 196 struct cpufreq_frequency_table *freq_table,
178 unsigned long rate) 197 unsigned long rate)
@@ -205,9 +224,6 @@ int clk_reparent(struct clk *child, struct clk *parent)
205 list_add(&child->sibling, &parent->children); 224 list_add(&child->sibling, &parent->children);
206 child->parent = parent; 225 child->parent = parent;
207 226
208 /* now do the debugfs renaming to reattach the child
209 to the proper parent */
210
211 return 0; 227 return 0;
212} 228}
213 229
@@ -665,89 +681,6 @@ static int __init clk_syscore_init(void)
665subsys_initcall(clk_syscore_init); 681subsys_initcall(clk_syscore_init);
666#endif 682#endif
667 683
668/*
669 * debugfs support to trace clock tree hierarchy and attributes
670 */
671static struct dentry *clk_debugfs_root;
672
673static int clk_debugfs_register_one(struct clk *c)
674{
675 int err;
676 struct dentry *d;
677 struct clk *pa = c->parent;
678 char s[255];
679 char *p = s;
680
681 p += sprintf(p, "%p", c);
682 d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
683 if (!d)
684 return -ENOMEM;
685 c->dentry = d;
686
687 d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
688 if (!d) {
689 err = -ENOMEM;
690 goto err_out;
691 }
692 d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
693 if (!d) {
694 err = -ENOMEM;
695 goto err_out;
696 }
697 d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
698 if (!d) {
699 err = -ENOMEM;
700 goto err_out;
701 }
702 return 0;
703
704err_out:
705 debugfs_remove_recursive(c->dentry);
706 return err;
707}
708
709static int clk_debugfs_register(struct clk *c)
710{
711 int err;
712 struct clk *pa = c->parent;
713
714 if (pa && !pa->dentry) {
715 err = clk_debugfs_register(pa);
716 if (err)
717 return err;
718 }
719
720 if (!c->dentry) {
721 err = clk_debugfs_register_one(c);
722 if (err)
723 return err;
724 }
725 return 0;
726}
727
728static int __init clk_debugfs_init(void)
729{
730 struct clk *c;
731 struct dentry *d;
732 int err;
733
734 d = debugfs_create_dir("clock", NULL);
735 if (!d)
736 return -ENOMEM;
737 clk_debugfs_root = d;
738
739 list_for_each_entry(c, &clock_list, node) {
740 err = clk_debugfs_register(c);
741 if (err)
742 goto err_out;
743 }
744 return 0;
745err_out:
746 debugfs_remove_recursive(clk_debugfs_root);
747 return err;
748}
749late_initcall(clk_debugfs_init);
750
751static int __init clk_late_init(void) 684static int __init clk_late_init(void)
752{ 685{
753 unsigned long flags; 686 unsigned long flags;
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/drivers/sh/pm_runtime.c
index bd5c6a3b8c55..afe9282629b9 100644
--- a/arch/arm/mach-shmobile/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -1,7 +1,5 @@
1/* 1/*
2 * arch/arm/mach-shmobile/pm_runtime.c 2 * Runtime PM support code
3 *
4 * Runtime PM support code for SuperH Mobile ARM
5 * 3 *
6 * Copyright (C) 2009-2010 Magnus Damm 4 * Copyright (C) 2009-2010 Magnus Damm
7 * 5 *
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 79665e2e6ec5..16d6a839c7fa 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -907,7 +907,7 @@ static void atmel_spi_cleanup(struct spi_device *spi)
907 907
908/*-------------------------------------------------------------------------*/ 908/*-------------------------------------------------------------------------*/
909 909
910static int __init atmel_spi_probe(struct platform_device *pdev) 910static int __devinit atmel_spi_probe(struct platform_device *pdev)
911{ 911{
912 struct resource *regs; 912 struct resource *regs;
913 int irq; 913 int irq;
@@ -1003,7 +1003,7 @@ out_free:
1003 return ret; 1003 return ret;
1004} 1004}
1005 1005
1006static int __exit atmel_spi_remove(struct platform_device *pdev) 1006static int __devexit atmel_spi_remove(struct platform_device *pdev)
1007{ 1007{
1008 struct spi_master *master = platform_get_drvdata(pdev); 1008 struct spi_master *master = platform_get_drvdata(pdev);
1009 struct atmel_spi *as = spi_master_get_devdata(master); 1009 struct atmel_spi *as = spi_master_get_devdata(master);
@@ -1072,6 +1072,7 @@ static struct platform_driver atmel_spi_driver = {
1072 }, 1072 },
1073 .suspend = atmel_spi_suspend, 1073 .suspend = atmel_spi_suspend,
1074 .resume = atmel_spi_resume, 1074 .resume = atmel_spi_resume,
1075 .probe = atmel_spi_probe,
1075 .remove = __exit_p(atmel_spi_remove), 1076 .remove = __exit_p(atmel_spi_remove),
1076}; 1077};
1077module_platform_driver(atmel_spi_driver); 1078module_platform_driver(atmel_spi_driver);
diff --git a/drivers/staging/spectra/lld_mtd.c b/drivers/staging/spectra/lld_mtd.c
index 2bd34662beb5..a9c309a167c2 100644
--- a/drivers/staging/spectra/lld_mtd.c
+++ b/drivers/staging/spectra/lld_mtd.c
@@ -340,7 +340,7 @@ u16 mtd_Read_Page_Main_Spare(u8 *read_data, u32 Block,
340 struct mtd_oob_ops ops; 340 struct mtd_oob_ops ops;
341 int ret; 341 int ret;
342 342
343 ops.mode = MTD_OOB_AUTO; 343 ops.mode = MTD_OPS_AUTO_OOB;
344 ops.datbuf = read_data; 344 ops.datbuf = read_data;
345 ops.len = DeviceInfo.wPageDataSize; 345 ops.len = DeviceInfo.wPageDataSize;
346 ops.oobbuf = read_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET; 346 ops.oobbuf = read_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
@@ -400,7 +400,7 @@ u16 mtd_Write_Page_Main_Spare(u8 *write_data, u32 Block,
400 struct mtd_oob_ops ops; 400 struct mtd_oob_ops ops;
401 int ret; 401 int ret;
402 402
403 ops.mode = MTD_OOB_AUTO; 403 ops.mode = MTD_OPS_AUTO_OOB;
404 ops.datbuf = write_data; 404 ops.datbuf = write_data;
405 ops.len = DeviceInfo.wPageDataSize; 405 ops.len = DeviceInfo.wPageDataSize;
406 ops.oobbuf = write_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET; 406 ops.oobbuf = write_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
@@ -473,7 +473,7 @@ u16 mtd_Read_Page_Spare(u8 *read_data, u32 Block,
473 struct mtd_oob_ops ops; 473 struct mtd_oob_ops ops;
474 int ret; 474 int ret;
475 475
476 ops.mode = MTD_OOB_AUTO; 476 ops.mode = MTD_OPS_AUTO_OOB;
477 ops.datbuf = NULL; 477 ops.datbuf = NULL;
478 ops.len = 0; 478 ops.len = 0;
479 ops.oobbuf = read_data; 479 ops.oobbuf = read_data;
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 708f8e92771a..dd9a5743fa99 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -678,10 +678,10 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
678 return; 678 return;
679 679
680 if (delay > 1000) 680 if (delay > 1000)
681 schedule_delayed_work(&(tz->poll_queue), 681 queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
682 round_jiffies(msecs_to_jiffies(delay))); 682 round_jiffies(msecs_to_jiffies(delay)));
683 else 683 else
684 schedule_delayed_work(&(tz->poll_queue), 684 queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
685 msecs_to_jiffies(delay)); 685 msecs_to_jiffies(delay));
686} 686}
687 687
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 4cb0d0a3e57b..fc7bbba585ce 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -66,14 +66,16 @@
66static int debug; 66static int debug;
67module_param(debug, int, 0600); 67module_param(debug, int, 0600);
68 68
69#define T1 (HZ/10) 69/* Defaults: these are from the specification */
70#define T2 (HZ/3) 70
71#define N2 3 71#define T1 10 /* 100mS */
72#define T2 34 /* 333mS */
73#define N2 3 /* Retry 3 times */
72 74
73/* Use long timers for testing at low speed with debug on */ 75/* Use long timers for testing at low speed with debug on */
74#ifdef DEBUG_TIMING 76#ifdef DEBUG_TIMING
75#define T1 HZ 77#define T1 100
76#define T2 (2 * HZ) 78#define T2 200
77#endif 79#endif
78 80
79/* 81/*
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 1945c70539c2..aff9d612dff0 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -207,6 +207,25 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
207 }, 207 },
208 208
209 /* 209 /*
210 * Common SH-2(A) SCIF definitions for ports with FIFO data
211 * count registers.
212 */
213 [SCIx_SH2_SCIF_FIFODATA_REGTYPE] = {
214 [SCSMR] = { 0x00, 16 },
215 [SCBRR] = { 0x04, 8 },
216 [SCSCR] = { 0x08, 16 },
217 [SCxTDR] = { 0x0c, 8 },
218 [SCxSR] = { 0x10, 16 },
219 [SCxRDR] = { 0x14, 8 },
220 [SCFCR] = { 0x18, 16 },
221 [SCFDR] = { 0x1c, 16 },
222 [SCTFDR] = sci_reg_invalid,
223 [SCRFDR] = sci_reg_invalid,
224 [SCSPTR] = { 0x20, 16 },
225 [SCLSR] = { 0x24, 16 },
226 },
227
228 /*
210 * Common SH-3 SCIF definitions. 229 * Common SH-3 SCIF definitions.
211 */ 230 */
212 [SCIx_SH3_SCIF_REGTYPE] = { 231 [SCIx_SH3_SCIF_REGTYPE] = {
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 79a31e5b4b68..3d1bf41e8892 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -594,11 +594,11 @@ static struct virtio_config_ops virtio_pci_config_ops = {
594 594
595static void virtio_pci_release_dev(struct device *_d) 595static void virtio_pci_release_dev(struct device *_d)
596{ 596{
597 struct virtio_device *dev = container_of(_d, struct virtio_device, 597 /*
598 dev); 598 * No need for a release method as we allocate/free
599 struct virtio_pci_device *vp_dev = to_vp_device(dev); 599 * all devices together with the pci devices.
600 600 * Provide an empty one to avoid getting a warning from core.
601 kfree(vp_dev); 601 */
602} 602}
603 603
604/* the PCI probing function */ 604/* the PCI probing function */
@@ -686,6 +686,7 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
686 pci_iounmap(pci_dev, vp_dev->ioaddr); 686 pci_iounmap(pci_dev, vp_dev->ioaddr);
687 pci_release_regions(pci_dev); 687 pci_release_regions(pci_dev);
688 pci_disable_device(pci_dev); 688 pci_disable_device(pci_dev);
689 kfree(vp_dev);
689} 690}
690 691
691#ifdef CONFIG_PM 692#ifdef CONFIG_PM
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 5a5d325a3935..634608d2a6d0 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -147,14 +147,12 @@ struct btrfs_inode {
147 * the btrfs file release call will add this inode to the 147 * the btrfs file release call will add this inode to the
148 * ordered operations list so that we make sure to flush out any 148 * ordered operations list so that we make sure to flush out any
149 * new data the application may have written before commit. 149 * new data the application may have written before commit.
150 *
151 * yes, its silly to have a single bitflag, but we might grow more
152 * of these.
153 */ 150 */
154 unsigned ordered_data_close:1; 151 unsigned ordered_data_close:1;
155 unsigned orphan_meta_reserved:1; 152 unsigned orphan_meta_reserved:1;
156 unsigned dummy_inode:1; 153 unsigned dummy_inode:1;
157 unsigned in_defrag:1; 154 unsigned in_defrag:1;
155 unsigned delalloc_meta_reserved:1;
158 156
159 /* 157 /*
160 * always compress this one file 158 * always compress this one file
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 3a1b939c9ae2..5b163572e0ca 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -617,12 +617,14 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
617static int btrfs_delayed_inode_reserve_metadata( 617static int btrfs_delayed_inode_reserve_metadata(
618 struct btrfs_trans_handle *trans, 618 struct btrfs_trans_handle *trans,
619 struct btrfs_root *root, 619 struct btrfs_root *root,
620 struct inode *inode,
620 struct btrfs_delayed_node *node) 621 struct btrfs_delayed_node *node)
621{ 622{
622 struct btrfs_block_rsv *src_rsv; 623 struct btrfs_block_rsv *src_rsv;
623 struct btrfs_block_rsv *dst_rsv; 624 struct btrfs_block_rsv *dst_rsv;
624 u64 num_bytes; 625 u64 num_bytes;
625 int ret; 626 int ret;
627 int release = false;
626 628
627 src_rsv = trans->block_rsv; 629 src_rsv = trans->block_rsv;
628 dst_rsv = &root->fs_info->delayed_block_rsv; 630 dst_rsv = &root->fs_info->delayed_block_rsv;
@@ -652,12 +654,65 @@ static int btrfs_delayed_inode_reserve_metadata(
652 if (!ret) 654 if (!ret)
653 node->bytes_reserved = num_bytes; 655 node->bytes_reserved = num_bytes;
654 return ret; 656 return ret;
657 } else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
658 spin_lock(&BTRFS_I(inode)->lock);
659 if (BTRFS_I(inode)->delalloc_meta_reserved) {
660 BTRFS_I(inode)->delalloc_meta_reserved = 0;
661 spin_unlock(&BTRFS_I(inode)->lock);
662 release = true;
663 goto migrate;
664 }
665 spin_unlock(&BTRFS_I(inode)->lock);
666
667 /* Ok we didn't have space pre-reserved. This shouldn't happen
668 * too often but it can happen if we do delalloc to an existing
669 * inode which gets dirtied because of the time update, and then
670 * isn't touched again until after the transaction commits and
671 * then we try to write out the data. First try to be nice and
672 * reserve something strictly for us. If not be a pain and try
673 * to steal from the delalloc block rsv.
674 */
675 ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
676 if (!ret)
677 goto out;
678
679 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
680 if (!ret)
681 goto out;
682
683 /*
684 * Ok this is a problem, let's just steal from the global rsv
685 * since this really shouldn't happen that often.
686 */
687 WARN_ON(1);
688 ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
689 dst_rsv, num_bytes);
690 goto out;
655 } 691 }
656 692
693migrate:
657 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); 694 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
695
696out:
697 /*
698 * Migrate only takes a reservation, it doesn't touch the size of the
699 * block_rsv. This is to simplify people who don't normally have things
700 * migrated from their block rsv. If they go to release their
701 * reservation, that will decrease the size as well, so if migrate
702 * reduced size we'd end up with a negative size. But for the
703 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
704 * but we could in fact do this reserve/migrate dance several times
705 * between the time we did the original reservation and we'd clean it
706 * up. So to take care of this, release the space for the meta
707 * reservation here. I think it may be time for a documentation page on
708 * how block rsvs. work.
709 */
658 if (!ret) 710 if (!ret)
659 node->bytes_reserved = num_bytes; 711 node->bytes_reserved = num_bytes;
660 712
713 if (release)
714 btrfs_block_rsv_release(root, src_rsv, num_bytes);
715
661 return ret; 716 return ret;
662} 717}
663 718
@@ -1708,7 +1763,8 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1708 goto release_node; 1763 goto release_node;
1709 } 1764 }
1710 1765
1711 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); 1766 ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1767 delayed_node);
1712 if (ret) 1768 if (ret)
1713 goto release_node; 1769 goto release_node;
1714 1770
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 102c176fc29c..62afe5c5694e 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1890,31 +1890,32 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1890 u64 features; 1890 u64 features;
1891 struct btrfs_key location; 1891 struct btrfs_key location;
1892 struct buffer_head *bh; 1892 struct buffer_head *bh;
1893 struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root), 1893 struct btrfs_super_block *disk_super;
1894 GFP_NOFS);
1895 struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1896 GFP_NOFS);
1897 struct btrfs_root *tree_root = btrfs_sb(sb); 1894 struct btrfs_root *tree_root = btrfs_sb(sb);
1898 struct btrfs_fs_info *fs_info = NULL; 1895 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1899 struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), 1896 struct btrfs_root *extent_root;
1900 GFP_NOFS); 1897 struct btrfs_root *csum_root;
1901 struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), 1898 struct btrfs_root *chunk_root;
1902 GFP_NOFS); 1899 struct btrfs_root *dev_root;
1903 struct btrfs_root *log_tree_root; 1900 struct btrfs_root *log_tree_root;
1904
1905 int ret; 1901 int ret;
1906 int err = -EINVAL; 1902 int err = -EINVAL;
1907 int num_backups_tried = 0; 1903 int num_backups_tried = 0;
1908 int backup_index = 0; 1904 int backup_index = 0;
1909 1905
1910 struct btrfs_super_block *disk_super; 1906 extent_root = fs_info->extent_root =
1907 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1908 csum_root = fs_info->csum_root =
1909 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1910 chunk_root = fs_info->chunk_root =
1911 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1912 dev_root = fs_info->dev_root =
1913 kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1911 1914
1912 if (!extent_root || !tree_root || !tree_root->fs_info || 1915 if (!extent_root || !csum_root || !chunk_root || !dev_root) {
1913 !chunk_root || !dev_root || !csum_root) {
1914 err = -ENOMEM; 1916 err = -ENOMEM;
1915 goto fail; 1917 goto fail;
1916 } 1918 }
1917 fs_info = tree_root->fs_info;
1918 1919
1919 ret = init_srcu_struct(&fs_info->subvol_srcu); 1920 ret = init_srcu_struct(&fs_info->subvol_srcu);
1920 if (ret) { 1921 if (ret) {
@@ -1954,12 +1955,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1954 mutex_init(&fs_info->reloc_mutex); 1955 mutex_init(&fs_info->reloc_mutex);
1955 1956
1956 init_completion(&fs_info->kobj_unregister); 1957 init_completion(&fs_info->kobj_unregister);
1957 fs_info->tree_root = tree_root;
1958 fs_info->extent_root = extent_root;
1959 fs_info->csum_root = csum_root;
1960 fs_info->chunk_root = chunk_root;
1961 fs_info->dev_root = dev_root;
1962 fs_info->fs_devices = fs_devices;
1963 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); 1958 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1964 INIT_LIST_HEAD(&fs_info->space_info); 1959 INIT_LIST_HEAD(&fs_info->space_info);
1965 btrfs_mapping_init(&fs_info->mapping_tree); 1960 btrfs_mapping_init(&fs_info->mapping_tree);
@@ -2465,21 +2460,20 @@ fail_sb_buffer:
2465 btrfs_stop_workers(&fs_info->caching_workers); 2460 btrfs_stop_workers(&fs_info->caching_workers);
2466fail_alloc: 2461fail_alloc:
2467fail_iput: 2462fail_iput:
2463 btrfs_mapping_tree_free(&fs_info->mapping_tree);
2464
2468 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 2465 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2469 iput(fs_info->btree_inode); 2466 iput(fs_info->btree_inode);
2470
2471 btrfs_close_devices(fs_info->fs_devices);
2472 btrfs_mapping_tree_free(&fs_info->mapping_tree);
2473fail_bdi: 2467fail_bdi:
2474 bdi_destroy(&fs_info->bdi); 2468 bdi_destroy(&fs_info->bdi);
2475fail_srcu: 2469fail_srcu:
2476 cleanup_srcu_struct(&fs_info->subvol_srcu); 2470 cleanup_srcu_struct(&fs_info->subvol_srcu);
2477fail: 2471fail:
2472 btrfs_close_devices(fs_info->fs_devices);
2478 free_fs_info(fs_info); 2473 free_fs_info(fs_info);
2479 return ERR_PTR(err); 2474 return ERR_PTR(err);
2480 2475
2481recovery_tree_root: 2476recovery_tree_root:
2482
2483 if (!btrfs_test_opt(tree_root, RECOVERY)) 2477 if (!btrfs_test_opt(tree_root, RECOVERY))
2484 goto fail_tree_roots; 2478 goto fail_tree_roots;
2485 2479
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9879bd474632..b232150b5b6b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3797,16 +3797,16 @@ void btrfs_free_block_rsv(struct btrfs_root *root,
3797 kfree(rsv); 3797 kfree(rsv);
3798} 3798}
3799 3799
3800int btrfs_block_rsv_add(struct btrfs_root *root, 3800static inline int __block_rsv_add(struct btrfs_root *root,
3801 struct btrfs_block_rsv *block_rsv, 3801 struct btrfs_block_rsv *block_rsv,
3802 u64 num_bytes) 3802 u64 num_bytes, int flush)
3803{ 3803{
3804 int ret; 3804 int ret;
3805 3805
3806 if (num_bytes == 0) 3806 if (num_bytes == 0)
3807 return 0; 3807 return 0;
3808 3808
3809 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); 3809 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
3810 if (!ret) { 3810 if (!ret) {
3811 block_rsv_add_bytes(block_rsv, num_bytes, 1); 3811 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3812 return 0; 3812 return 0;
@@ -3815,22 +3815,18 @@ int btrfs_block_rsv_add(struct btrfs_root *root,
3815 return ret; 3815 return ret;
3816} 3816}
3817 3817
3818int btrfs_block_rsv_add(struct btrfs_root *root,
3819 struct btrfs_block_rsv *block_rsv,
3820 u64 num_bytes)
3821{
3822 return __block_rsv_add(root, block_rsv, num_bytes, 1);
3823}
3824
3818int btrfs_block_rsv_add_noflush(struct btrfs_root *root, 3825int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
3819 struct btrfs_block_rsv *block_rsv, 3826 struct btrfs_block_rsv *block_rsv,
3820 u64 num_bytes) 3827 u64 num_bytes)
3821{ 3828{
3822 int ret; 3829 return __block_rsv_add(root, block_rsv, num_bytes, 0);
3823
3824 if (num_bytes == 0)
3825 return 0;
3826
3827 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 0);
3828 if (!ret) {
3829 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3830 return 0;
3831 }
3832
3833 return ret;
3834} 3830}
3835 3831
3836int btrfs_block_rsv_check(struct btrfs_root *root, 3832int btrfs_block_rsv_check(struct btrfs_root *root,
@@ -4064,23 +4060,30 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4064 */ 4060 */
4065static unsigned drop_outstanding_extent(struct inode *inode) 4061static unsigned drop_outstanding_extent(struct inode *inode)
4066{ 4062{
4063 unsigned drop_inode_space = 0;
4067 unsigned dropped_extents = 0; 4064 unsigned dropped_extents = 0;
4068 4065
4069 BUG_ON(!BTRFS_I(inode)->outstanding_extents); 4066 BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4070 BTRFS_I(inode)->outstanding_extents--; 4067 BTRFS_I(inode)->outstanding_extents--;
4071 4068
4069 if (BTRFS_I(inode)->outstanding_extents == 0 &&
4070 BTRFS_I(inode)->delalloc_meta_reserved) {
4071 drop_inode_space = 1;
4072 BTRFS_I(inode)->delalloc_meta_reserved = 0;
4073 }
4074
4072 /* 4075 /*
4073 * If we have more or the same amount of outsanding extents than we have 4076 * If we have more or the same amount of outsanding extents than we have
4074 * reserved then we need to leave the reserved extents count alone. 4077 * reserved then we need to leave the reserved extents count alone.
4075 */ 4078 */
4076 if (BTRFS_I(inode)->outstanding_extents >= 4079 if (BTRFS_I(inode)->outstanding_extents >=
4077 BTRFS_I(inode)->reserved_extents) 4080 BTRFS_I(inode)->reserved_extents)
4078 return 0; 4081 return drop_inode_space;
4079 4082
4080 dropped_extents = BTRFS_I(inode)->reserved_extents - 4083 dropped_extents = BTRFS_I(inode)->reserved_extents -
4081 BTRFS_I(inode)->outstanding_extents; 4084 BTRFS_I(inode)->outstanding_extents;
4082 BTRFS_I(inode)->reserved_extents -= dropped_extents; 4085 BTRFS_I(inode)->reserved_extents -= dropped_extents;
4083 return dropped_extents; 4086 return dropped_extents + drop_inode_space;
4084} 4087}
4085 4088
4086/** 4089/**
@@ -4166,9 +4169,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4166 nr_extents = BTRFS_I(inode)->outstanding_extents - 4169 nr_extents = BTRFS_I(inode)->outstanding_extents -
4167 BTRFS_I(inode)->reserved_extents; 4170 BTRFS_I(inode)->reserved_extents;
4168 BTRFS_I(inode)->reserved_extents += nr_extents; 4171 BTRFS_I(inode)->reserved_extents += nr_extents;
4172 }
4169 4173
4170 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); 4174 /*
4175 * Add an item to reserve for updating the inode when we complete the
4176 * delalloc io.
4177 */
4178 if (!BTRFS_I(inode)->delalloc_meta_reserved) {
4179 nr_extents++;
4180 BTRFS_I(inode)->delalloc_meta_reserved = 1;
4171 } 4181 }
4182
4183 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4172 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); 4184 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4173 spin_unlock(&BTRFS_I(inode)->lock); 4185 spin_unlock(&BTRFS_I(inode)->lock);
4174 4186
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 7a15fcfb3e1f..181760f9d2ab 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -537,6 +537,13 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
537 struct btrfs_free_space *entry, u8 *type) 537 struct btrfs_free_space *entry, u8 *type)
538{ 538{
539 struct btrfs_free_space_entry *e; 539 struct btrfs_free_space_entry *e;
540 int ret;
541
542 if (!io_ctl->cur) {
543 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
544 if (ret)
545 return ret;
546 }
540 547
541 e = io_ctl->cur; 548 e = io_ctl->cur;
542 entry->offset = le64_to_cpu(e->offset); 549 entry->offset = le64_to_cpu(e->offset);
@@ -550,10 +557,7 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
550 557
551 io_ctl_unmap_page(io_ctl); 558 io_ctl_unmap_page(io_ctl);
552 559
553 if (io_ctl->index >= io_ctl->num_pages) 560 return 0;
554 return 0;
555
556 return io_ctl_check_crc(io_ctl, io_ctl->index);
557} 561}
558 562
559static int io_ctl_read_bitmap(struct io_ctl *io_ctl, 563static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
@@ -561,9 +565,6 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
561{ 565{
562 int ret; 566 int ret;
563 567
564 if (io_ctl->cur && io_ctl->cur != io_ctl->orig)
565 io_ctl_unmap_page(io_ctl);
566
567 ret = io_ctl_check_crc(io_ctl, io_ctl->index); 568 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
568 if (ret) 569 if (ret)
569 return ret; 570 return ret;
@@ -699,6 +700,8 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
699 num_entries--; 700 num_entries--;
700 } 701 }
701 702
703 io_ctl_unmap_page(&io_ctl);
704
702 /* 705 /*
703 * We add the bitmaps at the end of the entries in order that 706 * We add the bitmaps at the end of the entries in order that
704 * the bitmap entries are added to the cache. 707 * the bitmap entries are added to the cache.
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 53dcbdf446cd..f8962a957d65 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -398,6 +398,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
398 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 398 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
399 struct btrfs_path *path; 399 struct btrfs_path *path;
400 struct inode *inode; 400 struct inode *inode;
401 struct btrfs_block_rsv *rsv;
402 u64 num_bytes;
401 u64 alloc_hint = 0; 403 u64 alloc_hint = 0;
402 int ret; 404 int ret;
403 int prealloc; 405 int prealloc;
@@ -421,11 +423,26 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
421 if (!path) 423 if (!path)
422 return -ENOMEM; 424 return -ENOMEM;
423 425
426 rsv = trans->block_rsv;
427 trans->block_rsv = &root->fs_info->trans_block_rsv;
428
429 num_bytes = trans->bytes_reserved;
430 /*
431 * 1 item for inode item insertion if need
432 * 3 items for inode item update (in the worst case)
433 * 1 item for free space object
434 * 3 items for pre-allocation
435 */
436 trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8);
437 ret = btrfs_block_rsv_add_noflush(root, trans->block_rsv,
438 trans->bytes_reserved);
439 if (ret)
440 goto out;
424again: 441again:
425 inode = lookup_free_ino_inode(root, path); 442 inode = lookup_free_ino_inode(root, path);
426 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 443 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
427 ret = PTR_ERR(inode); 444 ret = PTR_ERR(inode);
428 goto out; 445 goto out_release;
429 } 446 }
430 447
431 if (IS_ERR(inode)) { 448 if (IS_ERR(inode)) {
@@ -434,7 +451,7 @@ again:
434 451
435 ret = create_free_ino_inode(root, trans, path); 452 ret = create_free_ino_inode(root, trans, path);
436 if (ret) 453 if (ret)
437 goto out; 454 goto out_release;
438 goto again; 455 goto again;
439 } 456 }
440 457
@@ -477,11 +494,14 @@ again:
477 } 494 }
478 btrfs_free_reserved_data_space(inode, prealloc); 495 btrfs_free_reserved_data_space(inode, prealloc);
479 496
497 ret = btrfs_write_out_ino_cache(root, trans, path);
480out_put: 498out_put:
481 iput(inode); 499 iput(inode);
500out_release:
501 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
482out: 502out:
483 if (ret == 0) 503 trans->block_rsv = rsv;
484 ret = btrfs_write_out_ino_cache(root, trans, path); 504 trans->bytes_reserved = num_bytes;
485 505
486 btrfs_free_path(path); 506 btrfs_free_path(path);
487 return ret; 507 return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 966ddcc4c63d..116ab67a06df 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -93,6 +93,8 @@ static noinline int cow_file_range(struct inode *inode,
93 struct page *locked_page, 93 struct page *locked_page,
94 u64 start, u64 end, int *page_started, 94 u64 start, u64 end, int *page_started,
95 unsigned long *nr_written, int unlock); 95 unsigned long *nr_written, int unlock);
96static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
97 struct btrfs_root *root, struct inode *inode);
96 98
97static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 99static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
98 struct inode *inode, struct inode *dir, 100 struct inode *inode, struct inode *dir,
@@ -1741,7 +1743,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1741 trans = btrfs_join_transaction(root); 1743 trans = btrfs_join_transaction(root);
1742 BUG_ON(IS_ERR(trans)); 1744 BUG_ON(IS_ERR(trans));
1743 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1745 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1744 ret = btrfs_update_inode(trans, root, inode); 1746 ret = btrfs_update_inode_fallback(trans, root, inode);
1745 BUG_ON(ret); 1747 BUG_ON(ret);
1746 } 1748 }
1747 goto out; 1749 goto out;
@@ -1791,7 +1793,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1791 1793
1792 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1794 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1793 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 1795 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1794 ret = btrfs_update_inode(trans, root, inode); 1796 ret = btrfs_update_inode_fallback(trans, root, inode);
1795 BUG_ON(ret); 1797 BUG_ON(ret);
1796 } 1798 }
1797 ret = 0; 1799 ret = 0;
@@ -2199,6 +2201,9 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2199 if (ret) 2201 if (ret)
2200 goto out; 2202 goto out;
2201 } 2203 }
2204 /* release the path since we're done with it */
2205 btrfs_release_path(path);
2206
2202 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; 2207 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2203 2208
2204 if (root->orphan_block_rsv) 2209 if (root->orphan_block_rsv)
@@ -2426,7 +2431,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
2426/* 2431/*
2427 * copy everything in the in-memory inode into the btree. 2432 * copy everything in the in-memory inode into the btree.
2428 */ 2433 */
2429noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 2434static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
2430 struct btrfs_root *root, struct inode *inode) 2435 struct btrfs_root *root, struct inode *inode)
2431{ 2436{
2432 struct btrfs_inode_item *inode_item; 2437 struct btrfs_inode_item *inode_item;
@@ -2434,21 +2439,6 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2434 struct extent_buffer *leaf; 2439 struct extent_buffer *leaf;
2435 int ret; 2440 int ret;
2436 2441
2437 /*
2438 * If the inode is a free space inode, we can deadlock during commit
2439 * if we put it into the delayed code.
2440 *
2441 * The data relocation inode should also be directly updated
2442 * without delay
2443 */
2444 if (!btrfs_is_free_space_inode(root, inode)
2445 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
2446 ret = btrfs_delayed_update_inode(trans, root, inode);
2447 if (!ret)
2448 btrfs_set_inode_last_trans(trans, inode);
2449 return ret;
2450 }
2451
2452 path = btrfs_alloc_path(); 2442 path = btrfs_alloc_path();
2453 if (!path) 2443 if (!path)
2454 return -ENOMEM; 2444 return -ENOMEM;
@@ -2477,6 +2467,43 @@ failed:
2477} 2467}
2478 2468
2479/* 2469/*
2470 * copy everything in the in-memory inode into the btree.
2471 */
2472noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2473 struct btrfs_root *root, struct inode *inode)
2474{
2475 int ret;
2476
2477 /*
2478 * If the inode is a free space inode, we can deadlock during commit
2479 * if we put it into the delayed code.
2480 *
2481 * The data relocation inode should also be directly updated
2482 * without delay
2483 */
2484 if (!btrfs_is_free_space_inode(root, inode)
2485 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
2486 ret = btrfs_delayed_update_inode(trans, root, inode);
2487 if (!ret)
2488 btrfs_set_inode_last_trans(trans, inode);
2489 return ret;
2490 }
2491
2492 return btrfs_update_inode_item(trans, root, inode);
2493}
2494
2495static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
2496 struct btrfs_root *root, struct inode *inode)
2497{
2498 int ret;
2499
2500 ret = btrfs_update_inode(trans, root, inode);
2501 if (ret == -ENOSPC)
2502 return btrfs_update_inode_item(trans, root, inode);
2503 return ret;
2504}
2505
2506/*
2480 * unlink helper that gets used here in inode.c and in the tree logging 2507 * unlink helper that gets used here in inode.c and in the tree logging
2481 * recovery code. It remove a link in a directory with a given name, and 2508 * recovery code. It remove a link in a directory with a given name, and
2482 * also drops the back refs in the inode to the directory 2509 * also drops the back refs in the inode to the directory
@@ -5632,7 +5659,7 @@ again:
5632 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { 5659 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
5633 ret = btrfs_ordered_update_i_size(inode, 0, ordered); 5660 ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5634 if (!ret) 5661 if (!ret)
5635 err = btrfs_update_inode(trans, root, inode); 5662 err = btrfs_update_inode_fallback(trans, root, inode);
5636 goto out; 5663 goto out;
5637 } 5664 }
5638 5665
@@ -5670,7 +5697,7 @@ again:
5670 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); 5697 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5671 ret = btrfs_ordered_update_i_size(inode, 0, ordered); 5698 ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5672 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) 5699 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
5673 btrfs_update_inode(trans, root, inode); 5700 btrfs_update_inode_fallback(trans, root, inode);
5674 ret = 0; 5701 ret = 0;
5675out_unlock: 5702out_unlock:
5676 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, 5703 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
@@ -6529,14 +6556,16 @@ end_trans:
6529 ret = btrfs_orphan_del(NULL, inode); 6556 ret = btrfs_orphan_del(NULL, inode);
6530 } 6557 }
6531 6558
6532 trans->block_rsv = &root->fs_info->trans_block_rsv; 6559 if (trans) {
6533 ret = btrfs_update_inode(trans, root, inode); 6560 trans->block_rsv = &root->fs_info->trans_block_rsv;
6534 if (ret && !err) 6561 ret = btrfs_update_inode(trans, root, inode);
6535 err = ret; 6562 if (ret && !err)
6563 err = ret;
6536 6564
6537 nr = trans->blocks_used; 6565 nr = trans->blocks_used;
6538 ret = btrfs_end_transaction_throttle(trans, root); 6566 ret = btrfs_end_transaction_throttle(trans, root);
6539 btrfs_btree_balance_dirty(root, nr); 6567 btrfs_btree_balance_dirty(root, nr);
6568 }
6540 6569
6541out: 6570out:
6542 btrfs_free_block_rsv(root, rsv); 6571 btrfs_free_block_rsv(root, rsv);
@@ -6605,6 +6634,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
6605 ei->orphan_meta_reserved = 0; 6634 ei->orphan_meta_reserved = 0;
6606 ei->dummy_inode = 0; 6635 ei->dummy_inode = 0;
6607 ei->in_defrag = 0; 6636 ei->in_defrag = 0;
6637 ei->delalloc_meta_reserved = 0;
6608 ei->force_compress = BTRFS_COMPRESS_NONE; 6638 ei->force_compress = BTRFS_COMPRESS_NONE;
6609 6639
6610 ei->delayed_node = NULL; 6640 ei->delayed_node = NULL;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 24d654ce7a06..dff29d5e151a 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1174,6 +1174,8 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
1174 list_add_tail(&new_edge->list[UPPER], 1174 list_add_tail(&new_edge->list[UPPER],
1175 &new_node->lower); 1175 &new_node->lower);
1176 } 1176 }
1177 } else {
1178 list_add_tail(&new_node->lower, &cache->leaves);
1177 } 1179 }
1178 1180
1179 rb_node = tree_insert(&cache->rb_root, new_node->bytenr, 1181 rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index ed11d3866afd..f4190f22edfb 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -944,50 +944,18 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
944static int scrub_submit(struct scrub_dev *sdev) 944static int scrub_submit(struct scrub_dev *sdev)
945{ 945{
946 struct scrub_bio *sbio; 946 struct scrub_bio *sbio;
947 struct bio *bio;
948 int i;
949 947
950 if (sdev->curr == -1) 948 if (sdev->curr == -1)
951 return 0; 949 return 0;
952 950
953 sbio = sdev->bios[sdev->curr]; 951 sbio = sdev->bios[sdev->curr];
954
955 bio = bio_alloc(GFP_NOFS, sbio->count);
956 if (!bio)
957 goto nomem;
958
959 bio->bi_private = sbio;
960 bio->bi_end_io = scrub_bio_end_io;
961 bio->bi_bdev = sdev->dev->bdev;
962 bio->bi_sector = sbio->physical >> 9;
963
964 for (i = 0; i < sbio->count; ++i) {
965 struct page *page;
966 int ret;
967
968 page = alloc_page(GFP_NOFS);
969 if (!page)
970 goto nomem;
971
972 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
973 if (!ret) {
974 __free_page(page);
975 goto nomem;
976 }
977 }
978
979 sbio->err = 0; 952 sbio->err = 0;
980 sdev->curr = -1; 953 sdev->curr = -1;
981 atomic_inc(&sdev->in_flight); 954 atomic_inc(&sdev->in_flight);
982 955
983 submit_bio(READ, bio); 956 submit_bio(READ, sbio->bio);
984 957
985 return 0; 958 return 0;
986
987nomem:
988 scrub_free_bio(bio);
989
990 return -ENOMEM;
991} 959}
992 960
993static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, 961static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
@@ -995,6 +963,8 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
995 u8 *csum, int force) 963 u8 *csum, int force)
996{ 964{
997 struct scrub_bio *sbio; 965 struct scrub_bio *sbio;
966 struct page *page;
967 int ret;
998 968
999again: 969again:
1000 /* 970 /*
@@ -1015,12 +985,22 @@ again:
1015 } 985 }
1016 sbio = sdev->bios[sdev->curr]; 986 sbio = sdev->bios[sdev->curr];
1017 if (sbio->count == 0) { 987 if (sbio->count == 0) {
988 struct bio *bio;
989
1018 sbio->physical = physical; 990 sbio->physical = physical;
1019 sbio->logical = logical; 991 sbio->logical = logical;
992 bio = bio_alloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
993 if (!bio)
994 return -ENOMEM;
995
996 bio->bi_private = sbio;
997 bio->bi_end_io = scrub_bio_end_io;
998 bio->bi_bdev = sdev->dev->bdev;
999 bio->bi_sector = sbio->physical >> 9;
1000 sbio->err = 0;
1001 sbio->bio = bio;
1020 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || 1002 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
1021 sbio->logical + sbio->count * PAGE_SIZE != logical) { 1003 sbio->logical + sbio->count * PAGE_SIZE != logical) {
1022 int ret;
1023
1024 ret = scrub_submit(sdev); 1004 ret = scrub_submit(sdev);
1025 if (ret) 1005 if (ret)
1026 return ret; 1006 return ret;
@@ -1030,6 +1010,20 @@ again:
1030 sbio->spag[sbio->count].generation = gen; 1010 sbio->spag[sbio->count].generation = gen;
1031 sbio->spag[sbio->count].have_csum = 0; 1011 sbio->spag[sbio->count].have_csum = 0;
1032 sbio->spag[sbio->count].mirror_num = mirror_num; 1012 sbio->spag[sbio->count].mirror_num = mirror_num;
1013
1014 page = alloc_page(GFP_NOFS);
1015 if (!page)
1016 return -ENOMEM;
1017
1018 ret = bio_add_page(sbio->bio, page, PAGE_SIZE, 0);
1019 if (!ret) {
1020 __free_page(page);
1021 ret = scrub_submit(sdev);
1022 if (ret)
1023 return ret;
1024 goto again;
1025 }
1026
1033 if (csum) { 1027 if (csum) {
1034 sbio->spag[sbio->count].have_csum = 1; 1028 sbio->spag[sbio->count].have_csum = 1;
1035 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); 1029 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 57080dffdfc6..8bd9d6d0e07a 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -197,7 +197,7 @@ static match_table_t tokens = {
197 {Opt_subvolrootid, "subvolrootid=%d"}, 197 {Opt_subvolrootid, "subvolrootid=%d"},
198 {Opt_defrag, "autodefrag"}, 198 {Opt_defrag, "autodefrag"},
199 {Opt_inode_cache, "inode_cache"}, 199 {Opt_inode_cache, "inode_cache"},
200 {Opt_no_space_cache, "no_space_cache"}, 200 {Opt_no_space_cache, "nospace_cache"},
201 {Opt_recovery, "recovery"}, 201 {Opt_recovery, "recovery"},
202 {Opt_err, NULL}, 202 {Opt_err, NULL},
203}; 203};
@@ -448,6 +448,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
448 token = match_token(p, tokens, args); 448 token = match_token(p, tokens, args);
449 switch (token) { 449 switch (token) {
450 case Opt_subvol: 450 case Opt_subvol:
451 kfree(*subvol_name);
451 *subvol_name = match_strdup(&args[0]); 452 *subvol_name = match_strdup(&args[0]);
452 break; 453 break;
453 case Opt_subvolid: 454 case Opt_subvolid:
@@ -710,7 +711,7 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
710 if (btrfs_test_opt(root, SPACE_CACHE)) 711 if (btrfs_test_opt(root, SPACE_CACHE))
711 seq_puts(seq, ",space_cache"); 712 seq_puts(seq, ",space_cache");
712 else 713 else
713 seq_puts(seq, ",no_space_cache"); 714 seq_puts(seq, ",nospace_cache");
714 if (btrfs_test_opt(root, CLEAR_CACHE)) 715 if (btrfs_test_opt(root, CLEAR_CACHE))
715 seq_puts(seq, ",clear_cache"); 716 seq_puts(seq, ",clear_cache");
716 if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED)) 717 if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
@@ -890,7 +891,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
890 struct super_block *s; 891 struct super_block *s;
891 struct dentry *root; 892 struct dentry *root;
892 struct btrfs_fs_devices *fs_devices = NULL; 893 struct btrfs_fs_devices *fs_devices = NULL;
893 struct btrfs_root *tree_root = NULL;
894 struct btrfs_fs_info *fs_info = NULL; 894 struct btrfs_fs_info *fs_info = NULL;
895 fmode_t mode = FMODE_READ; 895 fmode_t mode = FMODE_READ;
896 char *subvol_name = NULL; 896 char *subvol_name = NULL;
@@ -904,8 +904,10 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
904 error = btrfs_parse_early_options(data, mode, fs_type, 904 error = btrfs_parse_early_options(data, mode, fs_type,
905 &subvol_name, &subvol_objectid, 905 &subvol_name, &subvol_objectid,
906 &subvol_rootid, &fs_devices); 906 &subvol_rootid, &fs_devices);
907 if (error) 907 if (error) {
908 kfree(subvol_name);
908 return ERR_PTR(error); 909 return ERR_PTR(error);
910 }
909 911
910 if (subvol_name) { 912 if (subvol_name) {
911 root = mount_subvol(subvol_name, flags, device_name, data); 913 root = mount_subvol(subvol_name, flags, device_name, data);
@@ -917,15 +919,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
917 if (error) 919 if (error)
918 return ERR_PTR(error); 920 return ERR_PTR(error);
919 921
920 error = btrfs_open_devices(fs_devices, mode, fs_type);
921 if (error)
922 return ERR_PTR(error);
923
924 if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
925 error = -EACCES;
926 goto error_close_devices;
927 }
928
929 /* 922 /*
930 * Setup a dummy root and fs_info for test/set super. This is because 923 * Setup a dummy root and fs_info for test/set super. This is because
931 * we don't actually fill this stuff out until open_ctree, but we need 924 * we don't actually fill this stuff out until open_ctree, but we need
@@ -933,24 +926,36 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
933 * then open_ctree will properly initialize everything later. 926 * then open_ctree will properly initialize everything later.
934 */ 927 */
935 fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS); 928 fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS);
936 tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); 929 if (!fs_info)
937 if (!fs_info || !tree_root) { 930 return ERR_PTR(-ENOMEM);
931
932 fs_info->tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
933 if (!fs_info->tree_root) {
938 error = -ENOMEM; 934 error = -ENOMEM;
939 goto error_close_devices; 935 goto error_fs_info;
940 } 936 }
941 fs_info->tree_root = tree_root; 937 fs_info->tree_root->fs_info = fs_info;
942 fs_info->fs_devices = fs_devices; 938 fs_info->fs_devices = fs_devices;
943 tree_root->fs_info = fs_info;
944 939
945 fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS); 940 fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
946 fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS); 941 fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
947 if (!fs_info->super_copy || !fs_info->super_for_commit) { 942 if (!fs_info->super_copy || !fs_info->super_for_commit) {
948 error = -ENOMEM; 943 error = -ENOMEM;
944 goto error_fs_info;
945 }
946
947 error = btrfs_open_devices(fs_devices, mode, fs_type);
948 if (error)
949 goto error_fs_info;
950
951 if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
952 error = -EACCES;
949 goto error_close_devices; 953 goto error_close_devices;
950 } 954 }
951 955
952 bdev = fs_devices->latest_bdev; 956 bdev = fs_devices->latest_bdev;
953 s = sget(fs_type, btrfs_test_super, btrfs_set_super, tree_root); 957 s = sget(fs_type, btrfs_test_super, btrfs_set_super,
958 fs_info->tree_root);
954 if (IS_ERR(s)) { 959 if (IS_ERR(s)) {
955 error = PTR_ERR(s); 960 error = PTR_ERR(s);
956 goto error_close_devices; 961 goto error_close_devices;
@@ -959,12 +964,12 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
959 if (s->s_root) { 964 if (s->s_root) {
960 if ((flags ^ s->s_flags) & MS_RDONLY) { 965 if ((flags ^ s->s_flags) & MS_RDONLY) {
961 deactivate_locked_super(s); 966 deactivate_locked_super(s);
962 return ERR_PTR(-EBUSY); 967 error = -EBUSY;
968 goto error_close_devices;
963 } 969 }
964 970
965 btrfs_close_devices(fs_devices); 971 btrfs_close_devices(fs_devices);
966 free_fs_info(fs_info); 972 free_fs_info(fs_info);
967 kfree(tree_root);
968 } else { 973 } else {
969 char b[BDEVNAME_SIZE]; 974 char b[BDEVNAME_SIZE];
970 975
@@ -991,8 +996,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
991 996
992error_close_devices: 997error_close_devices:
993 btrfs_close_devices(fs_devices); 998 btrfs_close_devices(fs_devices);
999error_fs_info:
994 free_fs_info(fs_info); 1000 free_fs_info(fs_info);
995 kfree(tree_root);
996 return ERR_PTR(error); 1001 return ERR_PTR(error);
997} 1002}
998 1003
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 960835eaf4da..6a0574e923bc 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -882,8 +882,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
882 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); 882 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
883 883
884 if (to_reserve > 0) { 884 if (to_reserve > 0) {
885 ret = btrfs_block_rsv_add(root, &pending->block_rsv, 885 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
886 to_reserve); 886 to_reserve);
887 if (ret) { 887 if (ret) {
888 pending->error = ret; 888 pending->error = ret;
889 goto fail; 889 goto fail;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index f8e2943101a1..c37433d3cd82 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -999,7 +999,7 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
999 key.objectid = device->devid; 999 key.objectid = device->devid;
1000 key.offset = start; 1000 key.offset = start;
1001 key.type = BTRFS_DEV_EXTENT_KEY; 1001 key.type = BTRFS_DEV_EXTENT_KEY;
1002 1002again:
1003 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1003 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1004 if (ret > 0) { 1004 if (ret > 0) {
1005 ret = btrfs_previous_item(root, path, key.objectid, 1005 ret = btrfs_previous_item(root, path, key.objectid,
@@ -1012,6 +1012,9 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1012 struct btrfs_dev_extent); 1012 struct btrfs_dev_extent);
1013 BUG_ON(found_key.offset > start || found_key.offset + 1013 BUG_ON(found_key.offset > start || found_key.offset +
1014 btrfs_dev_extent_length(leaf, extent) < start); 1014 btrfs_dev_extent_length(leaf, extent) < start);
1015 key = found_key;
1016 btrfs_release_path(path);
1017 goto again;
1015 } else if (ret == 0) { 1018 } else if (ret == 0) {
1016 leaf = path->nodes[0]; 1019 leaf = path->nodes[0];
1017 extent = btrfs_item_ptr(leaf, path->slots[0], 1020 extent = btrfs_item_ptr(leaf, path->slots[0],
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index c1f063cd1b0c..cf0b1539b321 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -645,20 +645,20 @@ int cifs_closedir(struct inode *inode, struct file *file)
645} 645}
646 646
647static struct cifsLockInfo * 647static struct cifsLockInfo *
648cifs_lock_init(__u64 len, __u64 offset, __u8 type, __u16 netfid) 648cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
649{ 649{
650 struct cifsLockInfo *li = 650 struct cifsLockInfo *lock =
651 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL); 651 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
652 if (!li) 652 if (!lock)
653 return li; 653 return lock;
654 li->netfid = netfid; 654 lock->offset = offset;
655 li->offset = offset; 655 lock->length = length;
656 li->length = len; 656 lock->type = type;
657 li->type = type; 657 lock->netfid = netfid;
658 li->pid = current->tgid; 658 lock->pid = current->tgid;
659 INIT_LIST_HEAD(&li->blist); 659 INIT_LIST_HEAD(&lock->blist);
660 init_waitqueue_head(&li->block_q); 660 init_waitqueue_head(&lock->block_q);
661 return li; 661 return lock;
662} 662}
663 663
664static void 664static void
@@ -672,7 +672,7 @@ cifs_del_lock_waiters(struct cifsLockInfo *lock)
672} 672}
673 673
674static bool 674static bool
675cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset, 675__cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
676 __u64 length, __u8 type, __u16 netfid, 676 __u64 length, __u8 type, __u16 netfid,
677 struct cifsLockInfo **conf_lock) 677 struct cifsLockInfo **conf_lock)
678{ 678{
@@ -694,6 +694,14 @@ cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
694 return false; 694 return false;
695} 695}
696 696
697static bool
698cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
699 struct cifsLockInfo **conf_lock)
700{
701 return __cifs_find_lock_conflict(cinode, lock->offset, lock->length,
702 lock->type, lock->netfid, conf_lock);
703}
704
697static int 705static int
698cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length, 706cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
699 __u8 type, __u16 netfid, struct file_lock *flock) 707 __u8 type, __u16 netfid, struct file_lock *flock)
@@ -704,8 +712,8 @@ cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
704 712
705 mutex_lock(&cinode->lock_mutex); 713 mutex_lock(&cinode->lock_mutex);
706 714
707 exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid, 715 exist = __cifs_find_lock_conflict(cinode, offset, length, type, netfid,
708 &conf_lock); 716 &conf_lock);
709 if (exist) { 717 if (exist) {
710 flock->fl_start = conf_lock->offset; 718 flock->fl_start = conf_lock->offset;
711 flock->fl_end = conf_lock->offset + conf_lock->length - 1; 719 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
@@ -723,40 +731,27 @@ cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
723 return rc; 731 return rc;
724} 732}
725 733
726static int 734static void
727cifs_lock_add(struct cifsInodeInfo *cinode, __u64 len, __u64 offset, 735cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
728 __u8 type, __u16 netfid)
729{ 736{
730 struct cifsLockInfo *li;
731
732 li = cifs_lock_init(len, offset, type, netfid);
733 if (!li)
734 return -ENOMEM;
735
736 mutex_lock(&cinode->lock_mutex); 737 mutex_lock(&cinode->lock_mutex);
737 list_add_tail(&li->llist, &cinode->llist); 738 list_add_tail(&lock->llist, &cinode->llist);
738 mutex_unlock(&cinode->lock_mutex); 739 mutex_unlock(&cinode->lock_mutex);
739 return 0;
740} 740}
741 741
742static int 742static int
743cifs_lock_add_if(struct cifsInodeInfo *cinode, __u64 offset, __u64 length, 743cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
744 __u8 type, __u16 netfid, bool wait) 744 bool wait)
745{ 745{
746 struct cifsLockInfo *lock, *conf_lock; 746 struct cifsLockInfo *conf_lock;
747 bool exist; 747 bool exist;
748 int rc = 0; 748 int rc = 0;
749 749
750 lock = cifs_lock_init(length, offset, type, netfid);
751 if (!lock)
752 return -ENOMEM;
753
754try_again: 750try_again:
755 exist = false; 751 exist = false;
756 mutex_lock(&cinode->lock_mutex); 752 mutex_lock(&cinode->lock_mutex);
757 753
758 exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid, 754 exist = cifs_find_lock_conflict(cinode, lock, &conf_lock);
759 &conf_lock);
760 if (!exist && cinode->can_cache_brlcks) { 755 if (!exist && cinode->can_cache_brlcks) {
761 list_add_tail(&lock->llist, &cinode->llist); 756 list_add_tail(&lock->llist, &cinode->llist);
762 mutex_unlock(&cinode->lock_mutex); 757 mutex_unlock(&cinode->lock_mutex);
@@ -775,13 +770,10 @@ try_again:
775 (lock->blist.next == &lock->blist)); 770 (lock->blist.next == &lock->blist));
776 if (!rc) 771 if (!rc)
777 goto try_again; 772 goto try_again;
778 else { 773 mutex_lock(&cinode->lock_mutex);
779 mutex_lock(&cinode->lock_mutex); 774 list_del_init(&lock->blist);
780 list_del_init(&lock->blist);
781 }
782 } 775 }
783 776
784 kfree(lock);
785 mutex_unlock(&cinode->lock_mutex); 777 mutex_unlock(&cinode->lock_mutex);
786 return rc; 778 return rc;
787} 779}
@@ -933,7 +925,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
933 else 925 else
934 type = CIFS_WRLCK; 926 type = CIFS_WRLCK;
935 927
936 lck = cifs_lock_init(length, flock->fl_start, type, 928 lck = cifs_lock_init(flock->fl_start, length, type,
937 cfile->netfid); 929 cfile->netfid);
938 if (!lck) { 930 if (!lck) {
939 rc = -ENOMEM; 931 rc = -ENOMEM;
@@ -1070,14 +1062,12 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
1070 if (rc != 0) 1062 if (rc != 0)
1071 cERROR(1, "Error unlocking previously locked " 1063 cERROR(1, "Error unlocking previously locked "
1072 "range %d during test of lock", rc); 1064 "range %d during test of lock", rc);
1073 rc = 0; 1065 return 0;
1074 return rc;
1075 } 1066 }
1076 1067
1077 if (type & LOCKING_ANDX_SHARED_LOCK) { 1068 if (type & LOCKING_ANDX_SHARED_LOCK) {
1078 flock->fl_type = F_WRLCK; 1069 flock->fl_type = F_WRLCK;
1079 rc = 0; 1070 return 0;
1080 return rc;
1081 } 1071 }
1082 1072
1083 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length, 1073 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
@@ -1095,8 +1085,7 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
1095 } else 1085 } else
1096 flock->fl_type = F_WRLCK; 1086 flock->fl_type = F_WRLCK;
1097 1087
1098 rc = 0; 1088 return 0;
1099 return rc;
1100} 1089}
1101 1090
1102static void 1091static void
@@ -1254,20 +1243,26 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
1254 } 1243 }
1255 1244
1256 if (lock) { 1245 if (lock) {
1257 rc = cifs_lock_add_if(cinode, flock->fl_start, length, 1246 struct cifsLockInfo *lock;
1258 type, netfid, wait_flag); 1247
1248 lock = cifs_lock_init(flock->fl_start, length, type, netfid);
1249 if (!lock)
1250 return -ENOMEM;
1251
1252 rc = cifs_lock_add_if(cinode, lock, wait_flag);
1259 if (rc < 0) 1253 if (rc < 0)
1260 return rc; 1254 kfree(lock);
1261 else if (!rc) 1255 if (rc <= 0)
1262 goto out; 1256 goto out;
1263 1257
1264 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length, 1258 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1265 flock->fl_start, 0, 1, type, wait_flag, 0); 1259 flock->fl_start, 0, 1, type, wait_flag, 0);
1266 if (rc == 0) { 1260 if (rc) {
1267 /* For Windows locks we must store them. */ 1261 kfree(lock);
1268 rc = cifs_lock_add(cinode, length, flock->fl_start, 1262 goto out;
1269 type, netfid);
1270 } 1263 }
1264
1265 cifs_lock_add(cinode, lock);
1271 } else if (unlock) 1266 } else if (unlock)
1272 rc = cifs_unlock_range(cfile, flock, xid); 1267 rc = cifs_unlock_range(cfile, flock, xid);
1273 1268
diff --git a/fs/dcache.c b/fs/dcache.c
index 274f13e2f094..a901c6901bce 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -546,9 +546,11 @@ int d_invalidate(struct dentry * dentry)
546 * would make it unreachable from the root, 546 * would make it unreachable from the root,
547 * we might still populate it if it was a 547 * we might still populate it if it was a
548 * working directory or similar). 548 * working directory or similar).
549 * We also need to leave mountpoints alone,
550 * directory or not.
549 */ 551 */
550 if (dentry->d_count > 1) { 552 if (dentry->d_count > 1 && dentry->d_inode) {
551 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { 553 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
552 spin_unlock(&dentry->d_lock); 554 spin_unlock(&dentry->d_lock);
553 return -EBUSY; 555 return -EBUSY;
554 } 556 }
diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c
index e673a88b8ae7..b1ce4c7ad3fb 100644
--- a/fs/hfs/trans.c
+++ b/fs/hfs/trans.c
@@ -40,6 +40,8 @@ int hfs_mac2asc(struct super_block *sb, char *out, const struct hfs_name *in)
40 40
41 src = in->name; 41 src = in->name;
42 srclen = in->len; 42 srclen = in->len;
43 if (srclen > HFS_NAMELEN)
44 srclen = HFS_NAMELEN;
43 dst = out; 45 dst = out;
44 dstlen = HFS_MAX_NAMELEN; 46 dstlen = HFS_MAX_NAMELEN;
45 if (nls_io) { 47 if (nls_io) {
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index de4247021d25..5b6c9d1a2fb9 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -53,6 +53,78 @@ static int jffs2_is_best_compression(struct jffs2_compressor *this,
53 return 0; 53 return 0;
54} 54}
55 55
56/*
57 * jffs2_selected_compress:
58 * @compr: Explicit compression type to use (ie, JFFS2_COMPR_ZLIB).
59 * If 0, just take the first available compression mode.
60 * @data_in: Pointer to uncompressed data
61 * @cpage_out: Pointer to returned pointer to buffer for compressed data
62 * @datalen: On entry, holds the amount of data available for compression.
63 * On exit, expected to hold the amount of data actually compressed.
64 * @cdatalen: On entry, holds the amount of space available for compressed
65 * data. On exit, expected to hold the actual size of the compressed
66 * data.
67 *
68 * Returns: the compression type used. Zero is used to show that the data
69 * could not be compressed; probably because we couldn't find the requested
70 * compression mode.
71 */
72static int jffs2_selected_compress(u8 compr, unsigned char *data_in,
73 unsigned char **cpage_out, u32 *datalen, u32 *cdatalen)
74{
75 struct jffs2_compressor *this;
76 int err, ret = JFFS2_COMPR_NONE;
77 uint32_t orig_slen, orig_dlen;
78 char *output_buf;
79
80 output_buf = kmalloc(*cdatalen, GFP_KERNEL);
81 if (!output_buf) {
82 printk(KERN_WARNING "JFFS2: No memory for compressor allocation. Compression failed.\n");
83 return ret;
84 }
85 orig_slen = *datalen;
86 orig_dlen = *cdatalen;
87 spin_lock(&jffs2_compressor_list_lock);
88 list_for_each_entry(this, &jffs2_compressor_list, list) {
89 /* Skip decompress-only and disabled modules */
90 if (!this->compress || this->disabled)
91 continue;
92
93 /* Skip if not the desired compression type */
94 if (compr && (compr != this->compr))
95 continue;
96
97 /*
98 * Either compression type was unspecified, or we found our
99 * compressor; either way, we're good to go.
100 */
101 this->usecount++;
102 spin_unlock(&jffs2_compressor_list_lock);
103
104 *datalen = orig_slen;
105 *cdatalen = orig_dlen;
106 err = this->compress(data_in, output_buf, datalen, cdatalen);
107
108 spin_lock(&jffs2_compressor_list_lock);
109 this->usecount--;
110 if (!err) {
111 /* Success */
112 ret = this->compr;
113 this->stat_compr_blocks++;
114 this->stat_compr_orig_size += *datalen;
115 this->stat_compr_new_size += *cdatalen;
116 break;
117 }
118 }
119 spin_unlock(&jffs2_compressor_list_lock);
120 if (ret == JFFS2_COMPR_NONE)
121 kfree(output_buf);
122 else
123 *cpage_out = output_buf;
124
125 return ret;
126}
127
56/* jffs2_compress: 128/* jffs2_compress:
57 * @data_in: Pointer to uncompressed data 129 * @data_in: Pointer to uncompressed data
58 * @cpage_out: Pointer to returned pointer to buffer for compressed data 130 * @cpage_out: Pointer to returned pointer to buffer for compressed data
@@ -76,47 +148,23 @@ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
76 uint32_t *datalen, uint32_t *cdatalen) 148 uint32_t *datalen, uint32_t *cdatalen)
77{ 149{
78 int ret = JFFS2_COMPR_NONE; 150 int ret = JFFS2_COMPR_NONE;
79 int compr_ret; 151 int mode, compr_ret;
80 struct jffs2_compressor *this, *best=NULL; 152 struct jffs2_compressor *this, *best=NULL;
81 unsigned char *output_buf = NULL, *tmp_buf; 153 unsigned char *output_buf = NULL, *tmp_buf;
82 uint32_t orig_slen, orig_dlen; 154 uint32_t orig_slen, orig_dlen;
83 uint32_t best_slen=0, best_dlen=0; 155 uint32_t best_slen=0, best_dlen=0;
84 156
85 switch (jffs2_compression_mode) { 157 if (c->mount_opts.override_compr)
158 mode = c->mount_opts.compr;
159 else
160 mode = jffs2_compression_mode;
161
162 switch (mode) {
86 case JFFS2_COMPR_MODE_NONE: 163 case JFFS2_COMPR_MODE_NONE:
87 break; 164 break;
88 case JFFS2_COMPR_MODE_PRIORITY: 165 case JFFS2_COMPR_MODE_PRIORITY:
89 output_buf = kmalloc(*cdatalen,GFP_KERNEL); 166 ret = jffs2_selected_compress(0, data_in, cpage_out, datalen,
90 if (!output_buf) { 167 cdatalen);
91 printk(KERN_WARNING "JFFS2: No memory for compressor allocation. Compression failed.\n");
92 goto out;
93 }
94 orig_slen = *datalen;
95 orig_dlen = *cdatalen;
96 spin_lock(&jffs2_compressor_list_lock);
97 list_for_each_entry(this, &jffs2_compressor_list, list) {
98 /* Skip decompress-only backwards-compatibility and disabled modules */
99 if ((!this->compress)||(this->disabled))
100 continue;
101
102 this->usecount++;
103 spin_unlock(&jffs2_compressor_list_lock);
104 *datalen = orig_slen;
105 *cdatalen = orig_dlen;
106 compr_ret = this->compress(data_in, output_buf, datalen, cdatalen);
107 spin_lock(&jffs2_compressor_list_lock);
108 this->usecount--;
109 if (!compr_ret) {
110 ret = this->compr;
111 this->stat_compr_blocks++;
112 this->stat_compr_orig_size += *datalen;
113 this->stat_compr_new_size += *cdatalen;
114 break;
115 }
116 }
117 spin_unlock(&jffs2_compressor_list_lock);
118 if (ret == JFFS2_COMPR_NONE)
119 kfree(output_buf);
120 break; 168 break;
121 case JFFS2_COMPR_MODE_SIZE: 169 case JFFS2_COMPR_MODE_SIZE:
122 case JFFS2_COMPR_MODE_FAVOURLZO: 170 case JFFS2_COMPR_MODE_FAVOURLZO:
@@ -174,22 +222,28 @@ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
174 best->stat_compr_orig_size += best_slen; 222 best->stat_compr_orig_size += best_slen;
175 best->stat_compr_new_size += best_dlen; 223 best->stat_compr_new_size += best_dlen;
176 ret = best->compr; 224 ret = best->compr;
225 *cpage_out = output_buf;
177 } 226 }
178 spin_unlock(&jffs2_compressor_list_lock); 227 spin_unlock(&jffs2_compressor_list_lock);
179 break; 228 break;
229 case JFFS2_COMPR_MODE_FORCELZO:
230 ret = jffs2_selected_compress(JFFS2_COMPR_LZO, data_in,
231 cpage_out, datalen, cdatalen);
232 break;
233 case JFFS2_COMPR_MODE_FORCEZLIB:
234 ret = jffs2_selected_compress(JFFS2_COMPR_ZLIB, data_in,
235 cpage_out, datalen, cdatalen);
236 break;
180 default: 237 default:
181 printk(KERN_ERR "JFFS2: unknown compression mode.\n"); 238 printk(KERN_ERR "JFFS2: unknown compression mode.\n");
182 } 239 }
183 out: 240
184 if (ret == JFFS2_COMPR_NONE) { 241 if (ret == JFFS2_COMPR_NONE) {
185 *cpage_out = data_in; 242 *cpage_out = data_in;
186 *datalen = *cdatalen; 243 *datalen = *cdatalen;
187 none_stat_compr_blocks++; 244 none_stat_compr_blocks++;
188 none_stat_compr_size += *datalen; 245 none_stat_compr_size += *datalen;
189 } 246 }
190 else {
191 *cpage_out = output_buf;
192 }
193 return ret; 247 return ret;
194} 248}
195 249
diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h
index 13bb7597ab39..5e91d578f4ed 100644
--- a/fs/jffs2/compr.h
+++ b/fs/jffs2/compr.h
@@ -40,6 +40,8 @@
40#define JFFS2_COMPR_MODE_PRIORITY 1 40#define JFFS2_COMPR_MODE_PRIORITY 1
41#define JFFS2_COMPR_MODE_SIZE 2 41#define JFFS2_COMPR_MODE_SIZE 2
42#define JFFS2_COMPR_MODE_FAVOURLZO 3 42#define JFFS2_COMPR_MODE_FAVOURLZO 3
43#define JFFS2_COMPR_MODE_FORCELZO 4
44#define JFFS2_COMPR_MODE_FORCEZLIB 5
43 45
44#define FAVOUR_LZO_PERCENT 80 46#define FAVOUR_LZO_PERCENT 80
45 47
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 7286e44ac665..4b8afe39a87f 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -379,7 +379,7 @@ void jffs2_dirty_inode(struct inode *inode, int flags)
379 jffs2_do_setattr(inode, &iattr); 379 jffs2_do_setattr(inode, &iattr);
380} 380}
381 381
382int jffs2_remount_fs (struct super_block *sb, int *flags, char *data) 382int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
383{ 383{
384 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); 384 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
385 385
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index 0bc6a6c80a56..55a0c1dceadf 100644
--- a/fs/jffs2/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -29,6 +29,11 @@
29 29
30struct jffs2_inodirty; 30struct jffs2_inodirty;
31 31
32struct jffs2_mount_opts {
33 bool override_compr;
34 unsigned int compr;
35};
36
32/* A struct for the overall file system control. Pointers to 37/* A struct for the overall file system control. Pointers to
33 jffs2_sb_info structs are named `c' in the source code. 38 jffs2_sb_info structs are named `c' in the source code.
34 Nee jffs_control 39 Nee jffs_control
@@ -126,6 +131,7 @@ struct jffs2_sb_info {
126#endif 131#endif
127 132
128 struct jffs2_summary *summary; /* Summary information */ 133 struct jffs2_summary *summary; /* Summary information */
134 struct jffs2_mount_opts mount_opts;
129 135
130#ifdef CONFIG_JFFS2_FS_XATTR 136#ifdef CONFIG_JFFS2_FS_XATTR
131#define XATTRINDEX_HASHSIZE (57) 137#define XATTRINDEX_HASHSIZE (57)
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 6c1755c59c0f..ab65ee3ec858 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -176,7 +176,7 @@ void jffs2_dirty_inode(struct inode *inode, int flags);
176struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, 176struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode,
177 struct jffs2_raw_inode *ri); 177 struct jffs2_raw_inode *ri);
178int jffs2_statfs (struct dentry *, struct kstatfs *); 178int jffs2_statfs (struct dentry *, struct kstatfs *);
179int jffs2_remount_fs (struct super_block *, int *, char *); 179int jffs2_do_remount_fs(struct super_block *, int *, char *);
180int jffs2_do_fill_super(struct super_block *sb, void *data, int silent); 180int jffs2_do_fill_super(struct super_block *sb, void *data, int silent);
181void jffs2_gc_release_inode(struct jffs2_sb_info *c, 181void jffs2_gc_release_inode(struct jffs2_sb_info *c,
182 struct jffs2_inode_info *f); 182 struct jffs2_inode_info *f);
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 8d8cd3419d02..28107ca136e4 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -275,9 +275,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
275 else 275 else
276 c->mtd->unpoint(c->mtd, 0, c->mtd->size); 276 c->mtd->unpoint(c->mtd, 0, c->mtd->size);
277#endif 277#endif
278 if (s) 278 kfree(s);
279 kfree(s);
280
281 return ret; 279 return ret;
282} 280}
283 281
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 853b8e300084..e7e974454115 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -17,11 +17,13 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/mount.h> 19#include <linux/mount.h>
20#include <linux/parser.h>
20#include <linux/jffs2.h> 21#include <linux/jffs2.h>
21#include <linux/pagemap.h> 22#include <linux/pagemap.h>
22#include <linux/mtd/super.h> 23#include <linux/mtd/super.h>
23#include <linux/ctype.h> 24#include <linux/ctype.h>
24#include <linux/namei.h> 25#include <linux/namei.h>
26#include <linux/seq_file.h>
25#include <linux/exportfs.h> 27#include <linux/exportfs.h>
26#include "compr.h" 28#include "compr.h"
27#include "nodelist.h" 29#include "nodelist.h"
@@ -75,6 +77,37 @@ static void jffs2_write_super(struct super_block *sb)
75 unlock_super(sb); 77 unlock_super(sb);
76} 78}
77 79
80static const char *jffs2_compr_name(unsigned int compr)
81{
82 switch (compr) {
83 case JFFS2_COMPR_MODE_NONE:
84 return "none";
85#ifdef CONFIG_JFFS2_LZO
86 case JFFS2_COMPR_MODE_FORCELZO:
87 return "lzo";
88#endif
89#ifdef CONFIG_JFFS2_ZLIB
90 case JFFS2_COMPR_MODE_FORCEZLIB:
91 return "zlib";
92#endif
93 default:
94 /* should never happen; programmer error */
95 WARN_ON(1);
96 return "";
97 }
98}
99
100static int jffs2_show_options(struct seq_file *s, struct vfsmount *mnt)
101{
102 struct jffs2_sb_info *c = JFFS2_SB_INFO(mnt->mnt_sb);
103 struct jffs2_mount_opts *opts = &c->mount_opts;
104
105 if (opts->override_compr)
106 seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr));
107
108 return 0;
109}
110
78static int jffs2_sync_fs(struct super_block *sb, int wait) 111static int jffs2_sync_fs(struct super_block *sb, int wait)
79{ 112{
80 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); 113 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
@@ -133,6 +166,85 @@ static const struct export_operations jffs2_export_ops = {
133 .fh_to_parent = jffs2_fh_to_parent, 166 .fh_to_parent = jffs2_fh_to_parent,
134}; 167};
135 168
169/*
170 * JFFS2 mount options.
171 *
172 * Opt_override_compr: override default compressor
173 * Opt_err: just end of array marker
174 */
175enum {
176 Opt_override_compr,
177 Opt_err,
178};
179
180static const match_table_t tokens = {
181 {Opt_override_compr, "compr=%s"},
182 {Opt_err, NULL},
183};
184
185static int jffs2_parse_options(struct jffs2_sb_info *c, char *data)
186{
187 substring_t args[MAX_OPT_ARGS];
188 char *p, *name;
189
190 if (!data)
191 return 0;
192
193 while ((p = strsep(&data, ","))) {
194 int token;
195
196 if (!*p)
197 continue;
198
199 token = match_token(p, tokens, args);
200 switch (token) {
201 case Opt_override_compr:
202 name = match_strdup(&args[0]);
203
204 if (!name)
205 return -ENOMEM;
206 if (!strcmp(name, "none"))
207 c->mount_opts.compr = JFFS2_COMPR_MODE_NONE;
208#ifdef CONFIG_JFFS2_LZO
209 else if (!strcmp(name, "lzo"))
210 c->mount_opts.compr = JFFS2_COMPR_MODE_FORCELZO;
211#endif
212#ifdef CONFIG_JFFS2_ZLIB
213 else if (!strcmp(name, "zlib"))
214 c->mount_opts.compr =
215 JFFS2_COMPR_MODE_FORCEZLIB;
216#endif
217 else {
218 printk(KERN_ERR "JFFS2 Error: unknown compressor \"%s\"",
219 name);
220 kfree(name);
221 return -EINVAL;
222 }
223 kfree(name);
224 c->mount_opts.override_compr = true;
225 break;
226 default:
227 printk(KERN_ERR "JFFS2 Error: unrecognized mount option '%s' or missing value\n",
228 p);
229 return -EINVAL;
230 }
231 }
232
233 return 0;
234}
235
236static int jffs2_remount_fs(struct super_block *sb, int *flags, char *data)
237{
238 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
239 int err;
240
241 err = jffs2_parse_options(c, data);
242 if (err)
243 return -EINVAL;
244
245 return jffs2_do_remount_fs(sb, flags, data);
246}
247
136static const struct super_operations jffs2_super_operations = 248static const struct super_operations jffs2_super_operations =
137{ 249{
138 .alloc_inode = jffs2_alloc_inode, 250 .alloc_inode = jffs2_alloc_inode,
@@ -143,6 +255,7 @@ static const struct super_operations jffs2_super_operations =
143 .remount_fs = jffs2_remount_fs, 255 .remount_fs = jffs2_remount_fs,
144 .evict_inode = jffs2_evict_inode, 256 .evict_inode = jffs2_evict_inode,
145 .dirty_inode = jffs2_dirty_inode, 257 .dirty_inode = jffs2_dirty_inode,
258 .show_options = jffs2_show_options,
146 .sync_fs = jffs2_sync_fs, 259 .sync_fs = jffs2_sync_fs,
147}; 260};
148 261
@@ -166,6 +279,12 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
166 c->os_priv = sb; 279 c->os_priv = sb;
167 sb->s_fs_info = c; 280 sb->s_fs_info = c;
168 281
282 ret = jffs2_parse_options(c, data);
283 if (ret) {
284 kfree(c);
285 return -EINVAL;
286 }
287
169 /* Initialize JFFS2 superblock locks, the further initialization will 288 /* Initialize JFFS2 superblock locks, the further initialization will
170 * be done later */ 289 * be done later */
171 mutex_init(&c->alloc_sem); 290 mutex_init(&c->alloc_sem);
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 4515bea0268f..b09e51d2f81f 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -578,8 +578,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
578 if (!jffs2_is_writebuffered(c)) 578 if (!jffs2_is_writebuffered(c))
579 return 0; 579 return 0;
580 580
581 if (mutex_trylock(&c->alloc_sem)) { 581 if (!mutex_is_locked(&c->alloc_sem)) {
582 mutex_unlock(&c->alloc_sem);
583 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n"); 582 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
584 BUG(); 583 BUG();
585 } 584 }
@@ -1026,7 +1025,7 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
1026 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); 1025 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1027 struct mtd_oob_ops ops; 1026 struct mtd_oob_ops ops;
1028 1027
1029 ops.mode = MTD_OOB_AUTO; 1028 ops.mode = MTD_OPS_AUTO_OOB;
1030 ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail; 1029 ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
1031 ops.oobbuf = c->oobbuf; 1030 ops.oobbuf = c->oobbuf;
1032 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; 1031 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
@@ -1069,7 +1068,7 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
1069 struct mtd_oob_ops ops; 1068 struct mtd_oob_ops ops;
1070 int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); 1069 int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1071 1070
1072 ops.mode = MTD_OOB_AUTO; 1071 ops.mode = MTD_OPS_AUTO_OOB;
1073 ops.ooblen = cmlen; 1072 ops.ooblen = cmlen;
1074 ops.oobbuf = c->oobbuf; 1073 ops.oobbuf = c->oobbuf;
1075 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; 1074 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
@@ -1095,7 +1094,7 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
1095 struct mtd_oob_ops ops; 1094 struct mtd_oob_ops ops;
1096 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); 1095 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1097 1096
1098 ops.mode = MTD_OOB_AUTO; 1097 ops.mode = MTD_OPS_AUTO_OOB;
1099 ops.ooblen = cmlen; 1098 ops.ooblen = cmlen;
1100 ops.oobbuf = (uint8_t *)&oob_cleanmarker; 1099 ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1101 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; 1100 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
diff --git a/fs/namei.c b/fs/namei.c
index ac6d214da827..5008f01787f5 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -852,7 +852,7 @@ static int follow_managed(struct path *path, unsigned flags)
852 mntput(path->mnt); 852 mntput(path->mnt);
853 if (ret == -EISDIR) 853 if (ret == -EISDIR)
854 ret = 0; 854 ret = 0;
855 return ret; 855 return ret < 0 ? ret : need_mntput;
856} 856}
857 857
858int follow_down_one(struct path *path) 858int follow_down_one(struct path *path)
@@ -900,6 +900,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
900 break; 900 break;
901 path->mnt = mounted; 901 path->mnt = mounted;
902 path->dentry = mounted->mnt_root; 902 path->dentry = mounted->mnt_root;
903 nd->flags |= LOOKUP_JUMPED;
903 nd->seq = read_seqcount_begin(&path->dentry->d_seq); 904 nd->seq = read_seqcount_begin(&path->dentry->d_seq);
904 /* 905 /*
905 * Update the inode too. We don't need to re-check the 906 * Update the inode too. We don't need to re-check the
@@ -1213,6 +1214,8 @@ retry:
1213 path_put_conditional(path, nd); 1214 path_put_conditional(path, nd);
1214 return err; 1215 return err;
1215 } 1216 }
1217 if (err)
1218 nd->flags |= LOOKUP_JUMPED;
1216 *inode = path->dentry->d_inode; 1219 *inode = path->dentry->d_inode;
1217 return 0; 1220 return 0;
1218} 1221}
@@ -2146,6 +2149,10 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
2146 } 2149 }
2147 2150
2148 /* create side of things */ 2151 /* create side of things */
2152 /*
2153 * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED has been
2154 * cleared when we got to the last component we are about to look up
2155 */
2149 error = complete_walk(nd); 2156 error = complete_walk(nd);
2150 if (error) 2157 if (error)
2151 return ERR_PTR(error); 2158 return ERR_PTR(error);
@@ -2214,6 +2221,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
2214 if (error < 0) 2221 if (error < 0)
2215 goto exit_dput; 2222 goto exit_dput;
2216 2223
2224 if (error)
2225 nd->flags |= LOOKUP_JUMPED;
2226
2217 error = -ENOENT; 2227 error = -ENOENT;
2218 if (!path->dentry->d_inode) 2228 if (!path->dentry->d_inode)
2219 goto exit_dput; 2229 goto exit_dput;
@@ -2223,6 +2233,10 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
2223 2233
2224 path_to_nameidata(path, nd); 2234 path_to_nameidata(path, nd);
2225 nd->inode = path->dentry->d_inode; 2235 nd->inode = path->dentry->d_inode;
2236 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
2237 error = complete_walk(nd);
2238 if (error)
2239 goto exit;
2226 error = -EISDIR; 2240 error = -EISDIR;
2227 if (S_ISDIR(nd->inode->i_mode)) 2241 if (S_ISDIR(nd->inode->i_mode))
2228 goto exit; 2242 goto exit;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 2db1bd3173b2..851ba3dcdc29 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1652,46 +1652,12 @@ out:
1652 return error; 1652 return error;
1653} 1653}
1654 1654
1655static int proc_pid_fd_link_getattr(struct vfsmount *mnt, struct dentry *dentry,
1656 struct kstat *stat)
1657{
1658 struct inode *inode = dentry->d_inode;
1659 struct task_struct *task = get_proc_task(inode);
1660 int rc;
1661
1662 if (task == NULL)
1663 return -ESRCH;
1664
1665 rc = -EACCES;
1666 if (lock_trace(task))
1667 goto out_task;
1668
1669 generic_fillattr(inode, stat);
1670 unlock_trace(task);
1671 rc = 0;
1672out_task:
1673 put_task_struct(task);
1674 return rc;
1675}
1676
1677static const struct inode_operations proc_pid_link_inode_operations = { 1655static const struct inode_operations proc_pid_link_inode_operations = {
1678 .readlink = proc_pid_readlink, 1656 .readlink = proc_pid_readlink,
1679 .follow_link = proc_pid_follow_link, 1657 .follow_link = proc_pid_follow_link,
1680 .setattr = proc_setattr, 1658 .setattr = proc_setattr,
1681}; 1659};
1682 1660
1683static const struct inode_operations proc_fdinfo_link_inode_operations = {
1684 .setattr = proc_setattr,
1685 .getattr = proc_pid_fd_link_getattr,
1686};
1687
1688static const struct inode_operations proc_fd_link_inode_operations = {
1689 .readlink = proc_pid_readlink,
1690 .follow_link = proc_pid_follow_link,
1691 .setattr = proc_setattr,
1692 .getattr = proc_pid_fd_link_getattr,
1693};
1694
1695 1661
1696/* building an inode */ 1662/* building an inode */
1697 1663
@@ -1923,61 +1889,49 @@ out:
1923 1889
1924static int proc_fd_info(struct inode *inode, struct path *path, char *info) 1890static int proc_fd_info(struct inode *inode, struct path *path, char *info)
1925{ 1891{
1926 struct task_struct *task; 1892 struct task_struct *task = get_proc_task(inode);
1927 struct files_struct *files; 1893 struct files_struct *files = NULL;
1928 struct file *file; 1894 struct file *file;
1929 int fd = proc_fd(inode); 1895 int fd = proc_fd(inode);
1930 int rc;
1931
1932 task = get_proc_task(inode);
1933 if (!task)
1934 return -ENOENT;
1935
1936 rc = -EACCES;
1937 if (lock_trace(task))
1938 goto out_task;
1939
1940 rc = -ENOENT;
1941 files = get_files_struct(task);
1942 if (files == NULL)
1943 goto out_unlock;
1944 1896
1945 /* 1897 if (task) {
1946 * We are not taking a ref to the file structure, so we must 1898 files = get_files_struct(task);
1947 * hold ->file_lock. 1899 put_task_struct(task);
1948 */ 1900 }
1949 spin_lock(&files->file_lock); 1901 if (files) {
1950 file = fcheck_files(files, fd); 1902 /*
1951 if (file) { 1903 * We are not taking a ref to the file structure, so we must
1952 unsigned int f_flags; 1904 * hold ->file_lock.
1953 struct fdtable *fdt; 1905 */
1954 1906 spin_lock(&files->file_lock);
1955 fdt = files_fdtable(files); 1907 file = fcheck_files(files, fd);
1956 f_flags = file->f_flags & ~O_CLOEXEC; 1908 if (file) {
1957 if (FD_ISSET(fd, fdt->close_on_exec)) 1909 unsigned int f_flags;
1958 f_flags |= O_CLOEXEC; 1910 struct fdtable *fdt;
1959 1911
1960 if (path) { 1912 fdt = files_fdtable(files);
1961 *path = file->f_path; 1913 f_flags = file->f_flags & ~O_CLOEXEC;
1962 path_get(&file->f_path); 1914 if (FD_ISSET(fd, fdt->close_on_exec))
1915 f_flags |= O_CLOEXEC;
1916
1917 if (path) {
1918 *path = file->f_path;
1919 path_get(&file->f_path);
1920 }
1921 if (info)
1922 snprintf(info, PROC_FDINFO_MAX,
1923 "pos:\t%lli\n"
1924 "flags:\t0%o\n",
1925 (long long) file->f_pos,
1926 f_flags);
1927 spin_unlock(&files->file_lock);
1928 put_files_struct(files);
1929 return 0;
1963 } 1930 }
1964 if (info) 1931 spin_unlock(&files->file_lock);
1965 snprintf(info, PROC_FDINFO_MAX, 1932 put_files_struct(files);
1966 "pos:\t%lli\n" 1933 }
1967 "flags:\t0%o\n", 1934 return -ENOENT;
1968 (long long) file->f_pos,
1969 f_flags);
1970 rc = 0;
1971 } else
1972 rc = -ENOENT;
1973 spin_unlock(&files->file_lock);
1974 put_files_struct(files);
1975
1976out_unlock:
1977 unlock_trace(task);
1978out_task:
1979 put_task_struct(task);
1980 return rc;
1981} 1935}
1982 1936
1983static int proc_fd_link(struct inode *inode, struct path *path) 1937static int proc_fd_link(struct inode *inode, struct path *path)
@@ -2072,7 +2026,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
2072 spin_unlock(&files->file_lock); 2026 spin_unlock(&files->file_lock);
2073 put_files_struct(files); 2027 put_files_struct(files);
2074 2028
2075 inode->i_op = &proc_fd_link_inode_operations; 2029 inode->i_op = &proc_pid_link_inode_operations;
2076 inode->i_size = 64; 2030 inode->i_size = 64;
2077 ei->op.proc_get_link = proc_fd_link; 2031 ei->op.proc_get_link = proc_fd_link;
2078 d_set_d_op(dentry, &tid_fd_dentry_operations); 2032 d_set_d_op(dentry, &tid_fd_dentry_operations);
@@ -2104,12 +2058,7 @@ static struct dentry *proc_lookupfd_common(struct inode *dir,
2104 if (fd == ~0U) 2058 if (fd == ~0U)
2105 goto out; 2059 goto out;
2106 2060
2107 result = ERR_PTR(-EACCES);
2108 if (lock_trace(task))
2109 goto out;
2110
2111 result = instantiate(dir, dentry, task, &fd); 2061 result = instantiate(dir, dentry, task, &fd);
2112 unlock_trace(task);
2113out: 2062out:
2114 put_task_struct(task); 2063 put_task_struct(task);
2115out_no_task: 2064out_no_task:
@@ -2129,28 +2078,23 @@ static int proc_readfd_common(struct file * filp, void * dirent,
2129 retval = -ENOENT; 2078 retval = -ENOENT;
2130 if (!p) 2079 if (!p)
2131 goto out_no_task; 2080 goto out_no_task;
2132
2133 retval = -EACCES;
2134 if (lock_trace(p))
2135 goto out;
2136
2137 retval = 0; 2081 retval = 0;
2138 2082
2139 fd = filp->f_pos; 2083 fd = filp->f_pos;
2140 switch (fd) { 2084 switch (fd) {
2141 case 0: 2085 case 0:
2142 if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) 2086 if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
2143 goto out_unlock; 2087 goto out;
2144 filp->f_pos++; 2088 filp->f_pos++;
2145 case 1: 2089 case 1:
2146 ino = parent_ino(dentry); 2090 ino = parent_ino(dentry);
2147 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) 2091 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
2148 goto out_unlock; 2092 goto out;
2149 filp->f_pos++; 2093 filp->f_pos++;
2150 default: 2094 default:
2151 files = get_files_struct(p); 2095 files = get_files_struct(p);
2152 if (!files) 2096 if (!files)
2153 goto out_unlock; 2097 goto out;
2154 rcu_read_lock(); 2098 rcu_read_lock();
2155 for (fd = filp->f_pos-2; 2099 for (fd = filp->f_pos-2;
2156 fd < files_fdtable(files)->max_fds; 2100 fd < files_fdtable(files)->max_fds;
@@ -2174,9 +2118,6 @@ static int proc_readfd_common(struct file * filp, void * dirent,
2174 rcu_read_unlock(); 2118 rcu_read_unlock();
2175 put_files_struct(files); 2119 put_files_struct(files);
2176 } 2120 }
2177
2178out_unlock:
2179 unlock_trace(p);
2180out: 2121out:
2181 put_task_struct(p); 2122 put_task_struct(p);
2182out_no_task: 2123out_no_task:
@@ -2254,7 +2195,6 @@ static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
2254 ei->fd = fd; 2195 ei->fd = fd;
2255 inode->i_mode = S_IFREG | S_IRUSR; 2196 inode->i_mode = S_IFREG | S_IRUSR;
2256 inode->i_fop = &proc_fdinfo_file_operations; 2197 inode->i_fop = &proc_fdinfo_file_operations;
2257 inode->i_op = &proc_fdinfo_link_inode_operations;
2258 d_set_d_op(dentry, &tid_fd_dentry_operations); 2198 d_set_d_op(dentry, &tid_fd_dentry_operations);
2259 d_add(dentry, inode); 2199 d_add(dentry, inode);
2260 /* Close the race of the process dying before we return the dentry */ 2200 /* Close the race of the process dying before we return the dentry */
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index eef109a1a927..b09ba2dd8b62 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -870,6 +870,22 @@ void dbg_dump_lpt_info(struct ubifs_info *c)
870 spin_unlock(&dbg_lock); 870 spin_unlock(&dbg_lock);
871} 871}
872 872
873void dbg_dump_sleb(const struct ubifs_info *c,
874 const struct ubifs_scan_leb *sleb, int offs)
875{
876 struct ubifs_scan_node *snod;
877
878 printk(KERN_DEBUG "(pid %d) start dumping scanned data from LEB %d:%d\n",
879 current->pid, sleb->lnum, offs);
880
881 list_for_each_entry(snod, &sleb->nodes, list) {
882 cond_resched();
883 printk(KERN_DEBUG "Dumping node at LEB %d:%d len %d\n", sleb->lnum,
884 snod->offs, snod->len);
885 dbg_dump_node(c, snod->node);
886 }
887}
888
873void dbg_dump_leb(const struct ubifs_info *c, int lnum) 889void dbg_dump_leb(const struct ubifs_info *c, int lnum)
874{ 890{
875 struct ubifs_scan_leb *sleb; 891 struct ubifs_scan_leb *sleb;
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index feb361e252ac..8d9c46810189 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -269,6 +269,8 @@ void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp);
269void dbg_dump_lprops(struct ubifs_info *c); 269void dbg_dump_lprops(struct ubifs_info *c);
270void dbg_dump_lpt_info(struct ubifs_info *c); 270void dbg_dump_lpt_info(struct ubifs_info *c);
271void dbg_dump_leb(const struct ubifs_info *c, int lnum); 271void dbg_dump_leb(const struct ubifs_info *c, int lnum);
272void dbg_dump_sleb(const struct ubifs_info *c,
273 const struct ubifs_scan_leb *sleb, int offs);
272void dbg_dump_znode(const struct ubifs_info *c, 274void dbg_dump_znode(const struct ubifs_info *c,
273 const struct ubifs_znode *znode); 275 const struct ubifs_znode *znode);
274void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat); 276void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat);
@@ -387,6 +389,9 @@ static inline void dbg_dump_lpt_info(struct ubifs_info *c) { return; }
387static inline void dbg_dump_leb(const struct ubifs_info *c, 389static inline void dbg_dump_leb(const struct ubifs_info *c,
388 int lnum) { return; } 390 int lnum) { return; }
389static inline void 391static inline void
392dbg_dump_sleb(const struct ubifs_info *c,
393 const struct ubifs_scan_leb *sleb, int offs) { return; }
394static inline void
390dbg_dump_znode(const struct ubifs_info *c, 395dbg_dump_znode(const struct ubifs_info *c,
391 const struct ubifs_znode *znode) { return; } 396 const struct ubifs_znode *znode) { return; }
392static inline void dbg_dump_heap(struct ubifs_info *c, 397static inline void dbg_dump_heap(struct ubifs_info *c,
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index af02790d9328..ee4f43f4bb99 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -983,7 +983,7 @@ int ubifs_recover_inl_heads(struct ubifs_info *c, void *sbuf)
983} 983}
984 984
985/** 985/**
986 * clean_an_unclean_leb - read and write a LEB to remove corruption. 986 * clean_an_unclean_leb - read and write a LEB to remove corruption.
987 * @c: UBIFS file-system description object 987 * @c: UBIFS file-system description object
988 * @ucleb: unclean LEB information 988 * @ucleb: unclean LEB information
989 * @sbuf: LEB-sized buffer to use 989 * @sbuf: LEB-sized buffer to use
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index 93d938ad3d2a..6094c5a5d7a8 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -247,7 +247,7 @@ static int create_default_filesystem(struct ubifs_info *c)
247 mst->total_dirty = cpu_to_le64(tmp64); 247 mst->total_dirty = cpu_to_le64(tmp64);
248 248
249 /* The indexing LEB does not contribute to dark space */ 249 /* The indexing LEB does not contribute to dark space */
250 tmp64 = (c->main_lebs - 1) * c->dark_wm; 250 tmp64 = ((long long)(c->main_lebs - 1) * c->dark_wm);
251 mst->total_dark = cpu_to_le64(tmp64); 251 mst->total_dark = cpu_to_le64(tmp64);
252 252
253 mst->total_used = cpu_to_le64(UBIFS_INO_NODE_SZ); 253 mst->total_used = cpu_to_le64(UBIFS_INO_NODE_SZ);
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 33b13310ee0c..574d4ee9b625 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -189,7 +189,7 @@ xfs_end_io(
189 int error = 0; 189 int error = 0;
190 190
191 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 191 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
192 error = -EIO; 192 ioend->io_error = -EIO;
193 goto done; 193 goto done;
194 } 194 }
195 if (ioend->io_error) 195 if (ioend->io_error)
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 1a3513881bce..eac97ef81e2a 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -656,7 +656,7 @@ xfs_buf_item_committing(
656/* 656/*
657 * This is the ops vector shared by all buf log items. 657 * This is the ops vector shared by all buf log items.
658 */ 658 */
659static struct xfs_item_ops xfs_buf_item_ops = { 659static const struct xfs_item_ops xfs_buf_item_ops = {
660 .iop_size = xfs_buf_item_size, 660 .iop_size = xfs_buf_item_size,
661 .iop_format = xfs_buf_item_format, 661 .iop_format = xfs_buf_item_format,
662 .iop_pin = xfs_buf_item_pin, 662 .iop_pin = xfs_buf_item_pin,
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index bb3f71d236d2..0dee0b71029d 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -295,7 +295,7 @@ xfs_qm_dquot_logitem_committing(
295/* 295/*
296 * This is the ops vector for dquots 296 * This is the ops vector for dquots
297 */ 297 */
298static struct xfs_item_ops xfs_dquot_item_ops = { 298static const struct xfs_item_ops xfs_dquot_item_ops = {
299 .iop_size = xfs_qm_dquot_logitem_size, 299 .iop_size = xfs_qm_dquot_logitem_size,
300 .iop_format = xfs_qm_dquot_logitem_format, 300 .iop_format = xfs_qm_dquot_logitem_format,
301 .iop_pin = xfs_qm_dquot_logitem_pin, 301 .iop_pin = xfs_qm_dquot_logitem_pin,
@@ -483,7 +483,7 @@ xfs_qm_qoff_logitem_committing(
483{ 483{
484} 484}
485 485
486static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = { 486static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
487 .iop_size = xfs_qm_qoff_logitem_size, 487 .iop_size = xfs_qm_qoff_logitem_size,
488 .iop_format = xfs_qm_qoff_logitem_format, 488 .iop_format = xfs_qm_qoff_logitem_format,
489 .iop_pin = xfs_qm_qoff_logitem_pin, 489 .iop_pin = xfs_qm_qoff_logitem_pin,
@@ -498,7 +498,7 @@ static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
498/* 498/*
499 * This is the ops vector shared by all quotaoff-start log items. 499 * This is the ops vector shared by all quotaoff-start log items.
500 */ 500 */
501static struct xfs_item_ops xfs_qm_qoff_logitem_ops = { 501static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
502 .iop_size = xfs_qm_qoff_logitem_size, 502 .iop_size = xfs_qm_qoff_logitem_size,
503 .iop_format = xfs_qm_qoff_logitem_format, 503 .iop_format = xfs_qm_qoff_logitem_format,
504 .iop_pin = xfs_qm_qoff_logitem_pin, 504 .iop_pin = xfs_qm_qoff_logitem_pin,
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index d22e62623437..35c2aff38b20 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -217,7 +217,7 @@ xfs_efi_item_committing(
217/* 217/*
218 * This is the ops vector shared by all efi log items. 218 * This is the ops vector shared by all efi log items.
219 */ 219 */
220static struct xfs_item_ops xfs_efi_item_ops = { 220static const struct xfs_item_ops xfs_efi_item_ops = {
221 .iop_size = xfs_efi_item_size, 221 .iop_size = xfs_efi_item_size,
222 .iop_format = xfs_efi_item_format, 222 .iop_format = xfs_efi_item_format,
223 .iop_pin = xfs_efi_item_pin, 223 .iop_pin = xfs_efi_item_pin,
@@ -477,7 +477,7 @@ xfs_efd_item_committing(
477/* 477/*
478 * This is the ops vector shared by all efd log items. 478 * This is the ops vector shared by all efd log items.
479 */ 479 */
480static struct xfs_item_ops xfs_efd_item_ops = { 480static const struct xfs_item_ops xfs_efd_item_ops = {
481 .iop_size = xfs_efd_item_size, 481 .iop_size = xfs_efd_item_size,
482 .iop_format = xfs_efd_item_format, 482 .iop_format = xfs_efd_item_format,
483 .iop_pin = xfs_efd_item_pin, 483 .iop_pin = xfs_efd_item_pin,
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index b7cf21ba240f..abaafdbb3e65 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -795,7 +795,7 @@ xfs_inode_item_committing(
795/* 795/*
796 * This is the ops vector shared by all buf log items. 796 * This is the ops vector shared by all buf log items.
797 */ 797 */
798static struct xfs_item_ops xfs_inode_item_ops = { 798static const struct xfs_item_ops xfs_inode_item_ops = {
799 .iop_size = xfs_inode_item_size, 799 .iop_size = xfs_inode_item_size,
800 .iop_format = xfs_inode_item_format, 800 .iop_format = xfs_inode_item_format,
801 .iop_pin = xfs_inode_item_pin, 801 .iop_pin = xfs_inode_item_pin,
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 2758a6277c52..a14cd89fe465 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -626,7 +626,7 @@ xfs_log_item_init(
626 struct xfs_mount *mp, 626 struct xfs_mount *mp,
627 struct xfs_log_item *item, 627 struct xfs_log_item *item,
628 int type, 628 int type,
629 struct xfs_item_ops *ops) 629 const struct xfs_item_ops *ops)
630{ 630{
631 item->li_mountp = mp; 631 item->li_mountp = mp;
632 item->li_ailp = mp->m_ail; 632 item->li_ailp = mp->m_ail;
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 78c9039994af..3f7bf451c034 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -137,7 +137,7 @@ struct xfs_trans;
137void xfs_log_item_init(struct xfs_mount *mp, 137void xfs_log_item_init(struct xfs_mount *mp,
138 struct xfs_log_item *item, 138 struct xfs_log_item *item,
139 int type, 139 int type,
140 struct xfs_item_ops *ops); 140 const struct xfs_item_ops *ops);
141 141
142xfs_lsn_t xfs_log_done(struct xfs_mount *mp, 142xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
143 struct xlog_ticket *ticket, 143 struct xlog_ticket *ticket,
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 603f3eb52041..3ae713c0abd9 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -326,7 +326,7 @@ typedef struct xfs_log_item {
326 struct xfs_log_item *); 326 struct xfs_log_item *);
327 /* buffer item iodone */ 327 /* buffer item iodone */
328 /* callback func */ 328 /* callback func */
329 struct xfs_item_ops *li_ops; /* function list */ 329 const struct xfs_item_ops *li_ops; /* function list */
330 330
331 /* delayed logging */ 331 /* delayed logging */
332 struct list_head li_cil; /* CIL pointers */ 332 struct list_head li_cil; /* CIL pointers */
@@ -341,7 +341,7 @@ typedef struct xfs_log_item {
341 { XFS_LI_IN_AIL, "IN_AIL" }, \ 341 { XFS_LI_IN_AIL, "IN_AIL" }, \
342 { XFS_LI_ABORTED, "ABORTED" } 342 { XFS_LI_ABORTED, "ABORTED" }
343 343
344typedef struct xfs_item_ops { 344struct xfs_item_ops {
345 uint (*iop_size)(xfs_log_item_t *); 345 uint (*iop_size)(xfs_log_item_t *);
346 void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *); 346 void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
347 void (*iop_pin)(xfs_log_item_t *); 347 void (*iop_pin)(xfs_log_item_t *);
@@ -352,7 +352,7 @@ typedef struct xfs_item_ops {
352 void (*iop_push)(xfs_log_item_t *); 352 void (*iop_push)(xfs_log_item_t *);
353 bool (*iop_pushbuf)(xfs_log_item_t *); 353 bool (*iop_pushbuf)(xfs_log_item_t *);
354 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); 354 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
355} xfs_item_ops_t; 355};
356 356
357#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip) 357#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip)
358#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp) 358#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp)
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 4ecf2a549060..ce9268a2f56b 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -112,7 +112,7 @@ xfs_readlink(
112 char *link) 112 char *link)
113{ 113{
114 xfs_mount_t *mp = ip->i_mount; 114 xfs_mount_t *mp = ip->i_mount;
115 int pathlen; 115 xfs_fsize_t pathlen;
116 int error = 0; 116 int error = 0;
117 117
118 trace_xfs_readlink(ip); 118 trace_xfs_readlink(ip);
@@ -122,13 +122,19 @@ xfs_readlink(
122 122
123 xfs_ilock(ip, XFS_ILOCK_SHARED); 123 xfs_ilock(ip, XFS_ILOCK_SHARED);
124 124
125 ASSERT(S_ISLNK(ip->i_d.di_mode));
126 ASSERT(ip->i_d.di_size <= MAXPATHLEN);
127
128 pathlen = ip->i_d.di_size; 125 pathlen = ip->i_d.di_size;
129 if (!pathlen) 126 if (!pathlen)
130 goto out; 127 goto out;
131 128
129 if (pathlen < 0 || pathlen > MAXPATHLEN) {
130 xfs_alert(mp, "%s: inode (%llu) bad symlink length (%lld)",
131 __func__, (unsigned long long) ip->i_ino,
132 (long long) pathlen);
133 ASSERT(0);
134 return XFS_ERROR(EFSCORRUPTED);
135 }
136
137
132 if (ip->i_df.if_flags & XFS_IFINLINE) { 138 if (ip->i_df.if_flags & XFS_IFINLINE) {
133 memcpy(link, ip->i_df.if_u1.if_data, pathlen); 139 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
134 link[pathlen] = '\0'; 140 link[pathlen] = '\0';
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index e49c36d38d7e..bb145e4b935e 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
144{ 144{
145} 145}
146static inline int register_hotplug_dock_device(acpi_handle handle, 146static inline int register_hotplug_dock_device(acpi_handle handle,
147 struct acpi_dock_ops *ops, 147 const struct acpi_dock_ops *ops,
148 void *context) 148 void *context)
149{ 149{
150 return -ENODEV; 150 return -ENODEV;
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index b67231bef632..ed73f6705c86 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -470,7 +470,6 @@ typedef u64 acpi_integer;
470 */ 470 */
471#define ACPI_FULL_INITIALIZATION 0x00 471#define ACPI_FULL_INITIALIZATION 0x00
472#define ACPI_NO_ADDRESS_SPACE_INIT 0x01 472#define ACPI_NO_ADDRESS_SPACE_INIT 0x01
473#define ACPI_NO_HARDWARE_INIT 0x02
474#define ACPI_NO_EVENT_INIT 0x04 473#define ACPI_NO_EVENT_INIT 0x04
475#define ACPI_NO_HANDLER_INIT 0x08 474#define ACPI_NO_HANDLER_INIT 0x08
476#define ACPI_NO_ACPI_ENABLE 0x10 475#define ACPI_NO_ACPI_ENABLE 0x10
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 67055f180330..610f6fb1bbc2 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -329,6 +329,7 @@ extern void acpi_processor_throttling_init(void);
329int acpi_processor_power_init(struct acpi_processor *pr, 329int acpi_processor_power_init(struct acpi_processor *pr,
330 struct acpi_device *device); 330 struct acpi_device *device);
331int acpi_processor_cst_has_changed(struct acpi_processor *pr); 331int acpi_processor_cst_has_changed(struct acpi_processor *pr);
332int acpi_processor_hotplug(struct acpi_processor *pr);
332int acpi_processor_power_exit(struct acpi_processor *pr, 333int acpi_processor_power_exit(struct acpi_processor *pr,
333 struct acpi_device *device); 334 struct acpi_device *device);
334int acpi_processor_suspend(struct acpi_device * device, pm_message_t state); 335int acpi_processor_suspend(struct acpi_device * device, pm_message_t state);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index cf399495d38f..1f9e9516e2b7 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -990,7 +990,9 @@ struct drm_minor {
990 struct proc_dir_entry *proc_root; /**< proc directory entry */ 990 struct proc_dir_entry *proc_root; /**< proc directory entry */
991 struct drm_info_node proc_nodes; 991 struct drm_info_node proc_nodes;
992 struct dentry *debugfs_root; 992 struct dentry *debugfs_root;
993 struct drm_info_node debugfs_nodes; 993
994 struct list_head debugfs_list;
995 struct mutex debugfs_lock; /* Protects debugfs_list. */
994 996
995 struct drm_master *master; /* currently active master for this node */ 997 struct drm_master *master; /* currently active master for this node */
996 struct list_head master_list; 998 struct list_head master_list;
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 0d2f727e96be..93df2d72750b 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -72,6 +72,7 @@
72 72
73#define DP_MAIN_LINK_CHANNEL_CODING 0x006 73#define DP_MAIN_LINK_CHANNEL_CODING 0x006
74 74
75#define DP_EDP_CONFIGURATION_CAP 0x00d
75#define DP_TRAINING_AUX_RD_INTERVAL 0x00e 76#define DP_TRAINING_AUX_RD_INTERVAL 0x00e
76 77
77#define DP_PSR_SUPPORT 0x070 78#define DP_PSR_SUPPORT 0x070
@@ -159,6 +160,8 @@
159# define DP_CP_IRQ (1 << 2) 160# define DP_CP_IRQ (1 << 2)
160# define DP_SINK_SPECIFIC_IRQ (1 << 6) 161# define DP_SINK_SPECIFIC_IRQ (1 << 6)
161 162
163#define DP_EDP_CONFIGURATION_SET 0x10a
164
162#define DP_LANE0_1_STATUS 0x202 165#define DP_LANE0_1_STATUS 0x202
163#define DP_LANE2_3_STATUS 0x203 166#define DP_LANE2_3_STATUS 0x203
164# define DP_LANE_CR_DONE (1 << 0) 167# define DP_LANE_CR_DONE (1 << 0)
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index c4961ea50a49..d30bedfeb7ef 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -120,11 +120,12 @@ struct drm_mode_crtc {
120 struct drm_mode_modeinfo mode; 120 struct drm_mode_modeinfo mode;
121}; 121};
122 122
123#define DRM_MODE_ENCODER_NONE 0 123#define DRM_MODE_ENCODER_NONE 0
124#define DRM_MODE_ENCODER_DAC 1 124#define DRM_MODE_ENCODER_DAC 1
125#define DRM_MODE_ENCODER_TMDS 2 125#define DRM_MODE_ENCODER_TMDS 2
126#define DRM_MODE_ENCODER_LVDS 3 126#define DRM_MODE_ENCODER_LVDS 3
127#define DRM_MODE_ENCODER_TVDAC 4 127#define DRM_MODE_ENCODER_TVDAC 4
128#define DRM_MODE_ENCODER_VIRTUAL 5
128 129
129struct drm_mode_get_encoder { 130struct drm_mode_get_encoder {
130 __u32 encoder_id; 131 __u32 encoder_id;
@@ -162,6 +163,7 @@ struct drm_mode_get_encoder {
162#define DRM_MODE_CONNECTOR_HDMIB 12 163#define DRM_MODE_CONNECTOR_HDMIB 12
163#define DRM_MODE_CONNECTOR_TV 13 164#define DRM_MODE_CONNECTOR_TV 13
164#define DRM_MODE_CONNECTOR_eDP 14 165#define DRM_MODE_CONNECTOR_eDP 14
166#define DRM_MODE_CONNECTOR_VIRTUAL 15
165 167
166struct drm_mode_get_connector { 168struct drm_mode_get_connector {
167 169
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 874c4d271328..1d161cb3aca5 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -36,11 +36,13 @@
36 * - this size value would be page-aligned internally. 36 * - this size value would be page-aligned internally.
37 * @flags: user request for setting memory type or cache attributes. 37 * @flags: user request for setting memory type or cache attributes.
38 * @handle: returned handle for the object. 38 * @handle: returned handle for the object.
39 * @pad: just padding to be 64-bit aligned.
39 */ 40 */
40struct drm_exynos_gem_create { 41struct drm_exynos_gem_create {
41 unsigned int size; 42 unsigned int size;
42 unsigned int flags; 43 unsigned int flags;
43 unsigned int handle; 44 unsigned int handle;
45 unsigned int pad;
44}; 46};
45 47
46/** 48/**
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
index cd7cd8162ed6..bcb0912afe7a 100644
--- a/include/drm/vmwgfx_drm.h
+++ b/include/drm/vmwgfx_drm.h
@@ -54,7 +54,7 @@
54#define DRM_VMW_FENCE_EVENT 17 54#define DRM_VMW_FENCE_EVENT 17
55#define DRM_VMW_PRESENT 18 55#define DRM_VMW_PRESENT 18
56#define DRM_VMW_PRESENT_READBACK 19 56#define DRM_VMW_PRESENT_READBACK 19
57 57#define DRM_VMW_UPDATE_LAYOUT 20
58 58
59/*************************************************************************/ 59/*************************************************************************/
60/** 60/**
@@ -552,31 +552,6 @@ struct drm_vmw_get_3d_cap_arg {
552 552
553/*************************************************************************/ 553/*************************************************************************/
554/** 554/**
555 * DRM_VMW_UPDATE_LAYOUT - Update layout
556 *
557 * Updates the preferred modes and connection status for connectors. The
558 * command conisits of one drm_vmw_update_layout_arg pointing out a array
559 * of num_outputs drm_vmw_rect's.
560 */
561
562/**
563 * struct drm_vmw_update_layout_arg
564 *
565 * @num_outputs: number of active
566 * @rects: pointer to array of drm_vmw_rect
567 *
568 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
569 */
570
571struct drm_vmw_update_layout_arg {
572 uint32_t num_outputs;
573 uint32_t pad64;
574 uint64_t rects;
575};
576
577
578/*************************************************************************/
579/**
580 * DRM_VMW_FENCE_WAIT 555 * DRM_VMW_FENCE_WAIT
581 * 556 *
582 * Waits for a fence object to signal. The wait is interruptible, so that 557 * Waits for a fence object to signal. The wait is interruptible, so that
@@ -788,4 +763,28 @@ struct drm_vmw_present_readback_arg {
788 uint64_t clips_ptr; 763 uint64_t clips_ptr;
789 uint64_t fence_rep; 764 uint64_t fence_rep;
790}; 765};
766
767/*************************************************************************/
768/**
769 * DRM_VMW_UPDATE_LAYOUT - Update layout
770 *
771 * Updates the preferred modes and connection status for connectors. The
772 * command consists of one drm_vmw_update_layout_arg pointing to an array
773 * of num_outputs drm_vmw_rect's.
774 */
775
776/**
777 * struct drm_vmw_update_layout_arg
778 *
779 * @num_outputs: number of active connectors
780 * @rects: pointer to array of drm_vmw_rect cast to an uint64_t
781 *
782 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
783 */
784struct drm_vmw_update_layout_arg {
785 uint32_t num_outputs;
786 uint32_t pad64;
787 uint64_t rects;
788};
789
791#endif 790#endif
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 583baf22cad2..7408af843b8a 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -23,57 +23,62 @@
23struct module; 23struct module;
24 24
25struct cpuidle_device; 25struct cpuidle_device;
26struct cpuidle_driver;
26 27
27 28
28/**************************** 29/****************************
29 * CPUIDLE DEVICE INTERFACE * 30 * CPUIDLE DEVICE INTERFACE *
30 ****************************/ 31 ****************************/
31 32
33struct cpuidle_state_usage {
34 void *driver_data;
35
36 unsigned long long usage;
37 unsigned long long time; /* in US */
38};
39
32struct cpuidle_state { 40struct cpuidle_state {
33 char name[CPUIDLE_NAME_LEN]; 41 char name[CPUIDLE_NAME_LEN];
34 char desc[CPUIDLE_DESC_LEN]; 42 char desc[CPUIDLE_DESC_LEN];
35 void *driver_data;
36 43
37 unsigned int flags; 44 unsigned int flags;
38 unsigned int exit_latency; /* in US */ 45 unsigned int exit_latency; /* in US */
39 unsigned int power_usage; /* in mW */ 46 unsigned int power_usage; /* in mW */
40 unsigned int target_residency; /* in US */ 47 unsigned int target_residency; /* in US */
41 48
42 unsigned long long usage;
43 unsigned long long time; /* in US */
44
45 int (*enter) (struct cpuidle_device *dev, 49 int (*enter) (struct cpuidle_device *dev,
46 struct cpuidle_state *state); 50 struct cpuidle_driver *drv,
51 int index);
47}; 52};
48 53
49/* Idle State Flags */ 54/* Idle State Flags */
50#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ 55#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
51#define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */
52 56
53#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) 57#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
54 58
55/** 59/**
56 * cpuidle_get_statedata - retrieves private driver state data 60 * cpuidle_get_statedata - retrieves private driver state data
57 * @state: the state 61 * @st_usage: the state usage statistics
58 */ 62 */
59static inline void * cpuidle_get_statedata(struct cpuidle_state *state) 63static inline void *cpuidle_get_statedata(struct cpuidle_state_usage *st_usage)
60{ 64{
61 return state->driver_data; 65 return st_usage->driver_data;
62} 66}
63 67
64/** 68/**
65 * cpuidle_set_statedata - stores private driver state data 69 * cpuidle_set_statedata - stores private driver state data
66 * @state: the state 70 * @st_usage: the state usage statistics
67 * @data: the private data 71 * @data: the private data
68 */ 72 */
69static inline void 73static inline void
70cpuidle_set_statedata(struct cpuidle_state *state, void *data) 74cpuidle_set_statedata(struct cpuidle_state_usage *st_usage, void *data)
71{ 75{
72 state->driver_data = data; 76 st_usage->driver_data = data;
73} 77}
74 78
75struct cpuidle_state_kobj { 79struct cpuidle_state_kobj {
76 struct cpuidle_state *state; 80 struct cpuidle_state *state;
81 struct cpuidle_state_usage *state_usage;
77 struct completion kobj_unregister; 82 struct completion kobj_unregister;
78 struct kobject kobj; 83 struct kobject kobj;
79}; 84};
@@ -81,22 +86,17 @@ struct cpuidle_state_kobj {
81struct cpuidle_device { 86struct cpuidle_device {
82 unsigned int registered:1; 87 unsigned int registered:1;
83 unsigned int enabled:1; 88 unsigned int enabled:1;
84 unsigned int power_specified:1;
85 unsigned int cpu; 89 unsigned int cpu;
86 90
87 int last_residency; 91 int last_residency;
88 int state_count; 92 int state_count;
89 struct cpuidle_state states[CPUIDLE_STATE_MAX]; 93 struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
90 struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; 94 struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
91 struct cpuidle_state *last_state;
92 95
93 struct list_head device_list; 96 struct list_head device_list;
94 struct kobject kobj; 97 struct kobject kobj;
95 struct completion kobj_unregister; 98 struct completion kobj_unregister;
96 void *governor_data; 99 void *governor_data;
97 struct cpuidle_state *safe_state;
98
99 int (*prepare) (struct cpuidle_device *dev);
100}; 100};
101 101
102DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 102DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
@@ -120,6 +120,11 @@ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
120struct cpuidle_driver { 120struct cpuidle_driver {
121 char name[CPUIDLE_NAME_LEN]; 121 char name[CPUIDLE_NAME_LEN];
122 struct module *owner; 122 struct module *owner;
123
124 unsigned int power_specified:1;
125 struct cpuidle_state states[CPUIDLE_STATE_MAX];
126 int state_count;
127 int safe_state_index;
123}; 128};
124 129
125#ifdef CONFIG_CPU_IDLE 130#ifdef CONFIG_CPU_IDLE
@@ -166,11 +171,14 @@ struct cpuidle_governor {
166 struct list_head governor_list; 171 struct list_head governor_list;
167 unsigned int rating; 172 unsigned int rating;
168 173
169 int (*enable) (struct cpuidle_device *dev); 174 int (*enable) (struct cpuidle_driver *drv,
170 void (*disable) (struct cpuidle_device *dev); 175 struct cpuidle_device *dev);
176 void (*disable) (struct cpuidle_driver *drv,
177 struct cpuidle_device *dev);
171 178
172 int (*select) (struct cpuidle_device *dev); 179 int (*select) (struct cpuidle_driver *drv,
173 void (*reflect) (struct cpuidle_device *dev); 180 struct cpuidle_device *dev);
181 void (*reflect) (struct cpuidle_device *dev, int index);
174 182
175 struct module *owner; 183 struct module *owner;
176}; 184};
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index afb94583960c..98ce8124b1cc 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -41,7 +41,7 @@ struct devfreq_dev_status {
41 unsigned long total_time; 41 unsigned long total_time;
42 unsigned long busy_time; 42 unsigned long busy_time;
43 unsigned long current_frequency; 43 unsigned long current_frequency;
44 void *private_date; 44 void *private_data;
45}; 45};
46 46
47/** 47/**
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 45f00b61c096..de33de1e2052 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -1097,10 +1097,12 @@ struct ethtool_ops {
1097#define SPEED_1000 1000 1097#define SPEED_1000 1000
1098#define SPEED_2500 2500 1098#define SPEED_2500 2500
1099#define SPEED_10000 10000 1099#define SPEED_10000 10000
1100#define SPEED_UNKNOWN -1
1100 1101
1101/* Duplex, half or full. */ 1102/* Duplex, half or full. */
1102#define DUPLEX_HALF 0x00 1103#define DUPLEX_HALF 0x00
1103#define DUPLEX_FULL 0x01 1104#define DUPLEX_FULL 0x01
1105#define DUPLEX_UNKNOWN 0xff
1104 1106
1105/* Which connector port. */ 1107/* Which connector port. */
1106#define PORT_TP 0x00 1108#define PORT_TP 0x00
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index 08a2fee40659..aad6bd4b3efd 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -118,7 +118,6 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
118static inline 118static inline
119void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) 119void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
120{ 120{
121 return 0;
122} 121}
123 122
124static inline int hwspin_lock_get_id(struct hwspinlock *hwlock) 123static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
index fae295048a8b..83a9caec0e43 100644
--- a/include/linux/mfd/wm8994/registers.h
+++ b/include/linux/mfd/wm8994/registers.h
@@ -1963,6 +1963,21 @@
1963#define WM8958_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */ 1963#define WM8958_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */
1964 1964
1965/* 1965/*
1966 * R210 (0xD2) - Mic Detect 3
1967 */
1968#define WM8958_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */
1969#define WM8958_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */
1970#define WM8958_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */
1971#define WM8958_MICD_VALID 0x0002 /* MICD_VALID */
1972#define WM8958_MICD_VALID_MASK 0x0002 /* MICD_VALID */
1973#define WM8958_MICD_VALID_SHIFT 1 /* MICD_VALID */
1974#define WM8958_MICD_VALID_WIDTH 1 /* MICD_VALID */
1975#define WM8958_MICD_STS 0x0001 /* MICD_STS */
1976#define WM8958_MICD_STS_MASK 0x0001 /* MICD_STS */
1977#define WM8958_MICD_STS_SHIFT 0 /* MICD_STS */
1978#define WM8958_MICD_STS_WIDTH 1 /* MICD_STS */
1979
1980/*
1966 * R76 (0x4C) - Charge Pump (1) 1981 * R76 (0x4C) - Charge Pump (1)
1967 */ 1982 */
1968#define WM8994_CP_ENA 0x8000 /* CP_ENA */ 1983#define WM8994_CP_ENA 0x8000 /* CP_ENA */
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 57cc0e63714f..c4eec228eef9 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -86,24 +86,39 @@ struct nand_bbt_descr {
86#define NAND_BBT_VERSION 0x00000100 86#define NAND_BBT_VERSION 0x00000100
87/* Create a bbt if none exists */ 87/* Create a bbt if none exists */
88#define NAND_BBT_CREATE 0x00000200 88#define NAND_BBT_CREATE 0x00000200
89/*
90 * Create an empty BBT with no vendor information. Vendor's information may be
91 * unavailable, for example, if the NAND controller has a different data and OOB
92 * layout or if this information is already purged. Must be used in conjunction
93 * with NAND_BBT_CREATE.
94 */
95#define NAND_BBT_CREATE_EMPTY 0x00000400
89/* Search good / bad pattern through all pages of a block */ 96/* Search good / bad pattern through all pages of a block */
90#define NAND_BBT_SCANALLPAGES 0x00000400 97#define NAND_BBT_SCANALLPAGES 0x00000800
91/* Scan block empty during good / bad block scan */ 98/* Scan block empty during good / bad block scan */
92#define NAND_BBT_SCANEMPTY 0x00000800 99#define NAND_BBT_SCANEMPTY 0x00001000
93/* Write bbt if neccecary */ 100/* Write bbt if neccecary */
94#define NAND_BBT_WRITE 0x00001000 101#define NAND_BBT_WRITE 0x00002000
95/* Read and write back block contents when writing bbt */ 102/* Read and write back block contents when writing bbt */
96#define NAND_BBT_SAVECONTENT 0x00002000 103#define NAND_BBT_SAVECONTENT 0x00004000
97/* Search good / bad pattern on the first and the second page */ 104/* Search good / bad pattern on the first and the second page */
98#define NAND_BBT_SCAN2NDPAGE 0x00004000 105#define NAND_BBT_SCAN2NDPAGE 0x00008000
99/* Search good / bad pattern on the last page of the eraseblock */ 106/* Search good / bad pattern on the last page of the eraseblock */
100#define NAND_BBT_SCANLASTPAGE 0x00008000 107#define NAND_BBT_SCANLASTPAGE 0x00010000
101/* Chip stores bad block marker on BOTH 1st and 6th bytes of OOB */ 108/*
102#define NAND_BBT_SCANBYTE1AND6 0x00100000 109 * Use a flash based bad block table. By default, OOB identifier is saved in
103/* The nand_bbt_descr was created dynamicaly and must be freed */ 110 * OOB area. This option is passed to the default bad block table function.
104#define NAND_BBT_DYNAMICSTRUCT 0x00200000 111 */
105/* The bad block table does not OOB for marker */ 112#define NAND_BBT_USE_FLASH 0x00020000
106#define NAND_BBT_NO_OOB 0x00400000 113/* Do not store flash based bad block table in OOB area; store it in-band */
114#define NAND_BBT_NO_OOB 0x00040000
115
116/*
117 * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
118 * was allocated dynamicaly and must be freed in nand_release(). Has no meaning
119 * in nand_chip.bbt_options.
120 */
121#define NAND_BBT_DYNAMICSTRUCT 0x80000000
107 122
108/* The maximum number of blocks to scan for a bbt */ 123/* The maximum number of blocks to scan for a bbt */
109#define NAND_BBT_SCAN_MAXBLOCKS 4 124#define NAND_BBT_SCAN_MAXBLOCKS 4
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 37be05bbfbc8..9f5b312af783 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -32,17 +32,19 @@
32#define MTD_CHAR_MAJOR 90 32#define MTD_CHAR_MAJOR 90
33#define MTD_BLOCK_MAJOR 31 33#define MTD_BLOCK_MAJOR 31
34 34
35#define MTD_ERASE_PENDING 0x01 35#define MTD_ERASE_PENDING 0x01
36#define MTD_ERASING 0x02 36#define MTD_ERASING 0x02
37#define MTD_ERASE_SUSPEND 0x04 37#define MTD_ERASE_SUSPEND 0x04
38#define MTD_ERASE_DONE 0x08 38#define MTD_ERASE_DONE 0x08
39#define MTD_ERASE_FAILED 0x10 39#define MTD_ERASE_FAILED 0x10
40 40
41#define MTD_FAIL_ADDR_UNKNOWN -1LL 41#define MTD_FAIL_ADDR_UNKNOWN -1LL
42 42
43/* If the erase fails, fail_addr might indicate exactly which block failed. If 43/*
44 fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not 44 * If the erase fails, fail_addr might indicate exactly which block failed. If
45 specific to any particular block. */ 45 * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
46 * or was not specific to any particular block.
47 */
46struct erase_info { 48struct erase_info {
47 struct mtd_info *mtd; 49 struct mtd_info *mtd;
48 uint64_t addr; 50 uint64_t addr;
@@ -59,26 +61,12 @@ struct erase_info {
59}; 61};
60 62
61struct mtd_erase_region_info { 63struct mtd_erase_region_info {
62 uint64_t offset; /* At which this region starts, from the beginning of the MTD */ 64 uint64_t offset; /* At which this region starts, from the beginning of the MTD */
63 uint32_t erasesize; /* For this region */ 65 uint32_t erasesize; /* For this region */
64 uint32_t numblocks; /* Number of blocks of erasesize in this region */ 66 uint32_t numblocks; /* Number of blocks of erasesize in this region */
65 unsigned long *lockmap; /* If keeping bitmap of locks */ 67 unsigned long *lockmap; /* If keeping bitmap of locks */
66}; 68};
67 69
68/*
69 * oob operation modes
70 *
71 * MTD_OOB_PLACE: oob data are placed at the given offset
72 * MTD_OOB_AUTO: oob data are automatically placed at the free areas
73 * which are defined by the ecclayout
74 * MTD_OOB_RAW: mode to read oob and data without doing ECC checking
75 */
76typedef enum {
77 MTD_OOB_PLACE,
78 MTD_OOB_AUTO,
79 MTD_OOB_RAW,
80} mtd_oob_mode_t;
81
82/** 70/**
83 * struct mtd_oob_ops - oob operation operands 71 * struct mtd_oob_ops - oob operation operands
84 * @mode: operation mode 72 * @mode: operation mode
@@ -90,7 +78,7 @@ typedef enum {
90 * @ooblen: number of oob bytes to write/read 78 * @ooblen: number of oob bytes to write/read
91 * @oobretlen: number of oob bytes written/read 79 * @oobretlen: number of oob bytes written/read
92 * @ooboffs: offset of oob data in the oob area (only relevant when 80 * @ooboffs: offset of oob data in the oob area (only relevant when
93 * mode = MTD_OOB_PLACE) 81 * mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW)
94 * @datbuf: data buffer - if NULL only oob data are read/written 82 * @datbuf: data buffer - if NULL only oob data are read/written
95 * @oobbuf: oob data buffer 83 * @oobbuf: oob data buffer
96 * 84 *
@@ -99,7 +87,7 @@ typedef enum {
99 * OOB area. 87 * OOB area.
100 */ 88 */
101struct mtd_oob_ops { 89struct mtd_oob_ops {
102 mtd_oob_mode_t mode; 90 unsigned int mode;
103 size_t len; 91 size_t len;
104 size_t retlen; 92 size_t retlen;
105 size_t ooblen; 93 size_t ooblen;
@@ -173,7 +161,7 @@ struct mtd_info {
173 const char *name; 161 const char *name;
174 int index; 162 int index;
175 163
176 /* ecc layout structure pointer - read only ! */ 164 /* ECC layout structure pointer - read only! */
177 struct nand_ecclayout *ecclayout; 165 struct nand_ecclayout *ecclayout;
178 166
179 /* Data for variable erase regions. If numeraseregions is zero, 167 /* Data for variable erase regions. If numeraseregions is zero,
@@ -324,10 +312,15 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
324 /* Kernel-side ioctl definitions */ 312 /* Kernel-side ioctl definitions */
325 313
326struct mtd_partition; 314struct mtd_partition;
327 315struct mtd_part_parser_data;
328extern int mtd_device_register(struct mtd_info *master, 316
329 const struct mtd_partition *parts, 317extern int mtd_device_parse_register(struct mtd_info *mtd,
330 int nr_parts); 318 const char **part_probe_types,
319 struct mtd_part_parser_data *parser_data,
320 const struct mtd_partition *defparts,
321 int defnr_parts);
322#define mtd_device_register(master, parts, nr_parts) \
323 mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
331extern int mtd_device_unregister(struct mtd_info *master); 324extern int mtd_device_unregister(struct mtd_info *master);
332extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num); 325extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
333extern int __get_mtd_device(struct mtd_info *mtd); 326extern int __get_mtd_device(struct mtd_info *mtd);
@@ -356,27 +349,16 @@ void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
356 349
357void mtd_erase_callback(struct erase_info *instr); 350void mtd_erase_callback(struct erase_info *instr);
358 351
359/* 352static inline int mtd_is_bitflip(int err) {
360 * Debugging macro and defines 353 return err == -EUCLEAN;
361 */ 354}
362#define MTD_DEBUG_LEVEL0 (0) /* Quiet */ 355
363#define MTD_DEBUG_LEVEL1 (1) /* Audible */ 356static inline int mtd_is_eccerr(int err) {
364#define MTD_DEBUG_LEVEL2 (2) /* Loud */ 357 return err == -EBADMSG;
365#define MTD_DEBUG_LEVEL3 (3) /* Noisy */ 358}
366 359
367#ifdef CONFIG_MTD_DEBUG 360static inline int mtd_is_bitflip_or_eccerr(int err) {
368#define DEBUG(n, args...) \ 361 return mtd_is_bitflip(err) || mtd_is_eccerr(err);
369 do { \ 362}
370 if (n <= CONFIG_MTD_DEBUG_VERBOSE) \
371 printk(KERN_INFO args); \
372 } while(0)
373#else /* CONFIG_MTD_DEBUG */
374#define DEBUG(n, args...) \
375 do { \
376 if (0) \
377 printk(KERN_INFO args); \
378 } while(0)
379
380#endif /* CONFIG_MTD_DEBUG */
381 363
382#endif /* __MTD_MTD_H__ */ 364#endif /* __MTD_MTD_H__ */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index c2b9ac4fbc4a..904131bab501 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -42,10 +42,10 @@ extern void nand_release(struct mtd_info *mtd);
42/* Internal helper for board drivers which need to override command function */ 42/* Internal helper for board drivers which need to override command function */
43extern void nand_wait_ready(struct mtd_info *mtd); 43extern void nand_wait_ready(struct mtd_info *mtd);
44 44
45/* locks all blockes present in the device */ 45/* locks all blocks present in the device */
46extern int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 46extern int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
47 47
48/* unlocks specified locked blockes */ 48/* unlocks specified locked blocks */
49extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 49extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
50 50
51/* The maximum number of NAND chips in an array */ 51/* The maximum number of NAND chips in an array */
@@ -150,7 +150,7 @@ typedef enum {
150#define NAND_ECC_READ 0 150#define NAND_ECC_READ 0
151/* Reset Hardware ECC for write */ 151/* Reset Hardware ECC for write */
152#define NAND_ECC_WRITE 1 152#define NAND_ECC_WRITE 1
153/* Enable Hardware ECC before syndrom is read back from flash */ 153/* Enable Hardware ECC before syndrome is read back from flash */
154#define NAND_ECC_READSYN 2 154#define NAND_ECC_READSYN 2
155 155
156/* Bit mask for flags passed to do_nand_read_ecc */ 156/* Bit mask for flags passed to do_nand_read_ecc */
@@ -163,7 +163,7 @@ typedef enum {
163 */ 163 */
164/* Chip can not auto increment pages */ 164/* Chip can not auto increment pages */
165#define NAND_NO_AUTOINCR 0x00000001 165#define NAND_NO_AUTOINCR 0x00000001
166/* Buswitdh is 16 bit */ 166/* Buswidth is 16 bit */
167#define NAND_BUSWIDTH_16 0x00000002 167#define NAND_BUSWIDTH_16 0x00000002
168/* Device supports partial programming without padding */ 168/* Device supports partial programming without padding */
169#define NAND_NO_PADDING 0x00000004 169#define NAND_NO_PADDING 0x00000004
@@ -219,27 +219,15 @@ typedef enum {
219#define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR) 219#define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR)
220 220
221/* Non chip related options */ 221/* Non chip related options */
222/*
223 * Use a flash based bad block table. OOB identifier is saved in OOB area.
224 * This option is passed to the default bad block table function.
225 */
226#define NAND_USE_FLASH_BBT 0x00010000
227/* This option skips the bbt scan during initialization. */ 222/* This option skips the bbt scan during initialization. */
228#define NAND_SKIP_BBTSCAN 0x00020000 223#define NAND_SKIP_BBTSCAN 0x00010000
229/* 224/*
230 * This option is defined if the board driver allocates its own buffers 225 * This option is defined if the board driver allocates its own buffers
231 * (e.g. because it needs them DMA-coherent). 226 * (e.g. because it needs them DMA-coherent).
232 */ 227 */
233#define NAND_OWN_BUFFERS 0x00040000 228#define NAND_OWN_BUFFERS 0x00020000
234/* Chip may not exist, so silence any errors in scan */ 229/* Chip may not exist, so silence any errors in scan */
235#define NAND_SCAN_SILENT_NODEV 0x00080000 230#define NAND_SCAN_SILENT_NODEV 0x00040000
236/*
237 * If passed additionally to NAND_USE_FLASH_BBT then BBT code will not touch
238 * the OOB area.
239 */
240#define NAND_USE_FLASH_BBT_NO_OOB 0x00800000
241/* Create an empty BBT with no vendor information if the BBT is available */
242#define NAND_CREATE_EMPTY_BBT 0x01000000
243 231
244/* Options set by nand scan */ 232/* Options set by nand scan */
245/* Nand scan has allocated controller struct */ 233/* Nand scan has allocated controller struct */
@@ -331,27 +319,29 @@ struct nand_hw_control {
331}; 319};
332 320
333/** 321/**
334 * struct nand_ecc_ctrl - Control structure for ecc 322 * struct nand_ecc_ctrl - Control structure for ECC
335 * @mode: ecc mode 323 * @mode: ECC mode
336 * @steps: number of ecc steps per page 324 * @steps: number of ECC steps per page
337 * @size: data bytes per ecc step 325 * @size: data bytes per ECC step
338 * @bytes: ecc bytes per step 326 * @bytes: ECC bytes per step
339 * @total: total number of ecc bytes per page 327 * @total: total number of ECC bytes per page
340 * @prepad: padding information for syndrome based ecc generators 328 * @prepad: padding information for syndrome based ECC generators
341 * @postpad: padding information for syndrome based ecc generators 329 * @postpad: padding information for syndrome based ECC generators
342 * @layout: ECC layout control struct pointer 330 * @layout: ECC layout control struct pointer
343 * @priv: pointer to private ecc control data 331 * @priv: pointer to private ECC control data
344 * @hwctl: function to control hardware ecc generator. Must only 332 * @hwctl: function to control hardware ECC generator. Must only
345 * be provided if an hardware ECC is available 333 * be provided if an hardware ECC is available
346 * @calculate: function for ecc calculation or readback from ecc hardware 334 * @calculate: function for ECC calculation or readback from ECC hardware
347 * @correct: function for ecc correction, matching to ecc generator (sw/hw) 335 * @correct: function for ECC correction, matching to ECC generator (sw/hw)
348 * @read_page_raw: function to read a raw page without ECC 336 * @read_page_raw: function to read a raw page without ECC
349 * @write_page_raw: function to write a raw page without ECC 337 * @write_page_raw: function to write a raw page without ECC
350 * @read_page: function to read a page according to the ecc generator 338 * @read_page: function to read a page according to the ECC generator
351 * requirements. 339 * requirements.
352 * @read_subpage: function to read parts of the page covered by ECC. 340 * @read_subpage: function to read parts of the page covered by ECC.
353 * @write_page: function to write a page according to the ecc generator 341 * @write_page: function to write a page according to the ECC generator
354 * requirements. 342 * requirements.
343 * @write_oob_raw: function to write chip OOB data without ECC
344 * @read_oob_raw: function to read chip OOB data without ECC
355 * @read_oob: function to read chip OOB data 345 * @read_oob: function to read chip OOB data
356 * @write_oob: function to write chip OOB data 346 * @write_oob: function to write chip OOB data
357 */ 347 */
@@ -380,6 +370,10 @@ struct nand_ecc_ctrl {
380 uint32_t offs, uint32_t len, uint8_t *buf); 370 uint32_t offs, uint32_t len, uint8_t *buf);
381 void (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, 371 void (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
382 const uint8_t *buf); 372 const uint8_t *buf);
373 int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
374 int page);
375 int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
376 int page, int sndcmd);
383 int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page, 377 int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page,
384 int sndcmd); 378 int sndcmd);
385 int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip, 379 int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -388,8 +382,8 @@ struct nand_ecc_ctrl {
388 382
389/** 383/**
390 * struct nand_buffers - buffer structure for read/write 384 * struct nand_buffers - buffer structure for read/write
391 * @ecccalc: buffer for calculated ecc 385 * @ecccalc: buffer for calculated ECC
392 * @ecccode: buffer for ecc read from flash 386 * @ecccode: buffer for ECC read from flash
393 * @databuf: buffer for data - dynamically sized 387 * @databuf: buffer for data - dynamically sized
394 * 388 *
395 * Do not change the order of buffers. databuf and oobrbuf must be in 389 * Do not change the order of buffers. databuf and oobrbuf must be in
@@ -422,7 +416,7 @@ struct nand_buffers {
422 * mtd->oobsize, mtd->writesize and so on. 416 * mtd->oobsize, mtd->writesize and so on.
423 * @id_data contains the 8 bytes values of NAND_CMD_READID. 417 * @id_data contains the 8 bytes values of NAND_CMD_READID.
424 * Return with the bus width. 418 * Return with the bus width.
425 * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accesing 419 * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing
426 * device ready/busy line. If set to NULL no access to 420 * device ready/busy line. If set to NULL no access to
427 * ready/busy is available and the ready/busy information 421 * ready/busy is available and the ready/busy information
428 * is read from the chip status register. 422 * is read from the chip status register.
@@ -430,17 +424,17 @@ struct nand_buffers {
430 * commands to the chip. 424 * commands to the chip.
431 * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on 425 * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on
432 * ready. 426 * ready.
433 * @ecc: [BOARDSPECIFIC] ecc control ctructure 427 * @ecc: [BOARDSPECIFIC] ECC control structure
434 * @buffers: buffer structure for read/write 428 * @buffers: buffer structure for read/write
435 * @hwcontrol: platform-specific hardware control structure 429 * @hwcontrol: platform-specific hardware control structure
436 * @ops: oob operation operands
437 * @erase_cmd: [INTERN] erase command write function, selectable due 430 * @erase_cmd: [INTERN] erase command write function, selectable due
438 * to AND support. 431 * to AND support.
439 * @scan_bbt: [REPLACEABLE] function to scan bad block table 432 * @scan_bbt: [REPLACEABLE] function to scan bad block table
440 * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring 433 * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring
441 * data from array to read regs (tR). 434 * data from array to read regs (tR).
442 * @state: [INTERN] the current state of the NAND device 435 * @state: [INTERN] the current state of the NAND device
443 * @oob_poi: poison value buffer 436 * @oob_poi: "poison value buffer," used for laying out OOB data
437 * before writing
444 * @page_shift: [INTERN] number of address bits in a page (column 438 * @page_shift: [INTERN] number of address bits in a page (column
445 * address bits). 439 * address bits).
446 * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock 440 * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock
@@ -449,6 +443,9 @@ struct nand_buffers {
449 * @options: [BOARDSPECIFIC] various chip options. They can partly 443 * @options: [BOARDSPECIFIC] various chip options. They can partly
450 * be set to inform nand_scan about special functionality. 444 * be set to inform nand_scan about special functionality.
451 * See the defines for further explanation. 445 * See the defines for further explanation.
446 * @bbt_options: [INTERN] bad block specific options. All options used
447 * here must come from bbm.h. By default, these options
448 * will be copied to the appropriate nand_bbt_descr's.
452 * @badblockpos: [INTERN] position of the bad block marker in the oob 449 * @badblockpos: [INTERN] position of the bad block marker in the oob
453 * area. 450 * area.
454 * @badblockbits: [INTERN] number of bits to left-shift the bad block 451 * @badblockbits: [INTERN] number of bits to left-shift the bad block
@@ -464,7 +461,7 @@ struct nand_buffers {
464 * non 0 if ONFI supported. 461 * non 0 if ONFI supported.
465 * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is 462 * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is
466 * supported, 0 otherwise. 463 * supported, 0 otherwise.
467 * @ecclayout: [REPLACEABLE] the default ecc placement scheme 464 * @ecclayout: [REPLACEABLE] the default ECC placement scheme
468 * @bbt: [INTERN] bad block table pointer 465 * @bbt: [INTERN] bad block table pointer
469 * @bbt_td: [REPLACEABLE] bad block table descriptor for flash 466 * @bbt_td: [REPLACEABLE] bad block table descriptor for flash
470 * lookup. 467 * lookup.
@@ -472,9 +469,9 @@ struct nand_buffers {
472 * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial 469 * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial
473 * bad block scan. 470 * bad block scan.
474 * @controller: [REPLACEABLE] a pointer to a hardware controller 471 * @controller: [REPLACEABLE] a pointer to a hardware controller
475 * structure which is shared among multiple independend 472 * structure which is shared among multiple independent
476 * devices. 473 * devices.
477 * @priv: [OPTIONAL] pointer to private chip date 474 * @priv: [OPTIONAL] pointer to private chip data
478 * @errstat: [OPTIONAL] hardware specific function to perform 475 * @errstat: [OPTIONAL] hardware specific function to perform
479 * additional error status checks (determine if errors are 476 * additional error status checks (determine if errors are
480 * correctable). 477 * correctable).
@@ -509,6 +506,7 @@ struct nand_chip {
509 506
510 int chip_delay; 507 int chip_delay;
511 unsigned int options; 508 unsigned int options;
509 unsigned int bbt_options;
512 510
513 int page_shift; 511 int page_shift;
514 int phys_erase_shift; 512 int phys_erase_shift;
@@ -536,8 +534,6 @@ struct nand_chip {
536 struct nand_buffers *buffers; 534 struct nand_buffers *buffers;
537 struct nand_hw_control hwcontrol; 535 struct nand_hw_control hwcontrol;
538 536
539 struct mtd_oob_ops ops;
540
541 uint8_t *bbt; 537 uint8_t *bbt;
542 struct nand_bbt_descr *bbt_td; 538 struct nand_bbt_descr *bbt_td;
543 struct nand_bbt_descr *bbt_md; 539 struct nand_bbt_descr *bbt_md;
@@ -611,10 +607,9 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
611 * @partitions: mtd partition list 607 * @partitions: mtd partition list
612 * @chip_delay: R/B delay value in us 608 * @chip_delay: R/B delay value in us
613 * @options: Option flags, e.g. 16bit buswidth 609 * @options: Option flags, e.g. 16bit buswidth
614 * @ecclayout: ecc layout info structure 610 * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
611 * @ecclayout: ECC layout info structure
615 * @part_probe_types: NULL-terminated array of probe types 612 * @part_probe_types: NULL-terminated array of probe types
616 * @set_parts: platform specific function to set partitions
617 * @priv: hardware controller specific settings
618 */ 613 */
619struct platform_nand_chip { 614struct platform_nand_chip {
620 int nr_chips; 615 int nr_chips;
@@ -624,9 +619,8 @@ struct platform_nand_chip {
624 struct nand_ecclayout *ecclayout; 619 struct nand_ecclayout *ecclayout;
625 int chip_delay; 620 int chip_delay;
626 unsigned int options; 621 unsigned int options;
622 unsigned int bbt_options;
627 const char **part_probe_types; 623 const char **part_probe_types;
628 void (*set_parts)(uint64_t size, struct platform_nand_chip *chip);
629 void *priv;
630}; 624};
631 625
632/* Keep gcc happy */ 626/* Keep gcc happy */
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 52b6f187bf49..4596503c9da9 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -184,6 +184,9 @@ struct onenand_chip {
184#define ONENAND_IS_CACHE_PROGRAM(this) \ 184#define ONENAND_IS_CACHE_PROGRAM(this) \
185 (this->options & ONENAND_HAS_CACHE_PROGRAM) 185 (this->options & ONENAND_HAS_CACHE_PROGRAM)
186 186
187#define ONENAND_IS_NOP_1(this) \
188 (this->options & ONENAND_HAS_NOP_1)
189
187/* Check byte access in OneNAND */ 190/* Check byte access in OneNAND */
188#define ONENAND_CHECK_BYTE_ACCESS(addr) (addr & 0x1) 191#define ONENAND_CHECK_BYTE_ACCESS(addr) (addr & 0x1)
189 192
@@ -195,6 +198,7 @@ struct onenand_chip {
195#define ONENAND_HAS_2PLANE (0x0004) 198#define ONENAND_HAS_2PLANE (0x0004)
196#define ONENAND_HAS_4KB_PAGE (0x0008) 199#define ONENAND_HAS_4KB_PAGE (0x0008)
197#define ONENAND_HAS_CACHE_PROGRAM (0x0010) 200#define ONENAND_HAS_CACHE_PROGRAM (0x0010)
201#define ONENAND_HAS_NOP_1 (0x0020)
198#define ONENAND_SKIP_UNLOCK_CHECK (0x0100) 202#define ONENAND_SKIP_UNLOCK_CHECK (0x0100)
199#define ONENAND_PAGEBUF_ALLOC (0x1000) 203#define ONENAND_PAGEBUF_ALLOC (0x1000)
200#define ONENAND_OOBBUF_ALLOC (0x2000) 204#define ONENAND_OOBBUF_ALLOC (0x2000)
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 3a6f0372fc96..2475228c1158 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -24,7 +24,9 @@
24 * will extend to the end of the master MTD device. 24 * will extend to the end of the master MTD device.
25 * offset: absolute starting position within the master MTD device; if 25 * offset: absolute starting position within the master MTD device; if
26 * defined as MTDPART_OFS_APPEND, the partition will start where the 26 * defined as MTDPART_OFS_APPEND, the partition will start where the
27 * previous one ended; if MTDPART_OFS_NXTBLK, at the next erase block. 27 * previous one ended; if MTDPART_OFS_NXTBLK, at the next erase block;
28 * if MTDPART_OFS_RETAIN, consume as much as possible, leaving size
29 * after the end of partition.
28 * mask_flags: contains flags that have to be masked (removed) from the 30 * mask_flags: contains flags that have to be masked (removed) from the
29 * master MTD flag set for the corresponding MTD partition. 31 * master MTD flag set for the corresponding MTD partition.
30 * For example, to force a read-only partition, simply adding 32 * For example, to force a read-only partition, simply adding
@@ -42,12 +44,25 @@ struct mtd_partition {
42 struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */ 44 struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */
43}; 45};
44 46
47#define MTDPART_OFS_RETAIN (-3)
45#define MTDPART_OFS_NXTBLK (-2) 48#define MTDPART_OFS_NXTBLK (-2)
46#define MTDPART_OFS_APPEND (-1) 49#define MTDPART_OFS_APPEND (-1)
47#define MTDPART_SIZ_FULL (0) 50#define MTDPART_SIZ_FULL (0)
48 51
49 52
50struct mtd_info; 53struct mtd_info;
54struct device_node;
55
56/**
57 * struct mtd_part_parser_data - used to pass data to MTD partition parsers.
58 * @origin: for RedBoot, start address of MTD device
59 * @of_node: for OF parsers, device node containing partitioning information
60 */
61struct mtd_part_parser_data {
62 unsigned long origin;
63 struct device_node *of_node;
64};
65
51 66
52/* 67/*
53 * Functions dealing with the various ways of partitioning the space 68 * Functions dealing with the various ways of partitioning the space
@@ -57,37 +72,12 @@ struct mtd_part_parser {
57 struct list_head list; 72 struct list_head list;
58 struct module *owner; 73 struct module *owner;
59 const char *name; 74 const char *name;
60 int (*parse_fn)(struct mtd_info *, struct mtd_partition **, unsigned long); 75 int (*parse_fn)(struct mtd_info *, struct mtd_partition **,
76 struct mtd_part_parser_data *);
61}; 77};
62 78
63extern int register_mtd_parser(struct mtd_part_parser *parser); 79extern int register_mtd_parser(struct mtd_part_parser *parser);
64extern int deregister_mtd_parser(struct mtd_part_parser *parser); 80extern int deregister_mtd_parser(struct mtd_part_parser *parser);
65extern int parse_mtd_partitions(struct mtd_info *master, const char **types,
66 struct mtd_partition **pparts, unsigned long origin);
67
68#define put_partition_parser(p) do { module_put((p)->owner); } while(0)
69
70struct device;
71struct device_node;
72
73#ifdef CONFIG_MTD_OF_PARTS
74int __devinit of_mtd_parse_partitions(struct device *dev,
75 struct device_node *node,
76 struct mtd_partition **pparts);
77#else
78static inline int of_mtd_parse_partitions(struct device *dev,
79 struct device_node *node,
80 struct mtd_partition **pparts)
81{
82 return 0;
83}
84#endif
85
86#ifdef CONFIG_MTD_CMDLINE_PARTS
87static inline int mtd_has_cmdlinepart(void) { return 1; }
88#else
89static inline int mtd_has_cmdlinepart(void) { return 0; }
90#endif
91 81
92int mtd_is_partition(struct mtd_info *mtd); 82int mtd_is_partition(struct mtd_info *mtd);
93int mtd_add_partition(struct mtd_info *master, char *name, 83int mtd_add_partition(struct mtd_info *master, char *name,
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
index e5f21d293c70..04e018160e2b 100644
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -32,21 +32,4 @@ struct physmap_flash_data {
32 struct mtd_partition *parts; 32 struct mtd_partition *parts;
33}; 33};
34 34
35/*
36 * Board needs to specify the exact mapping during their setup time.
37 */
38void physmap_configure(unsigned long addr, unsigned long size,
39 int bankwidth, void (*set_vpp)(struct map_info *, int) );
40
41/*
42 * Machines that wish to do flash partition may want to call this function in
43 * their setup routine.
44 *
45 * physmap_set_partitions(mypartitions, num_parts);
46 *
47 * Note that one can always override this hard-coded partition with
48 * command line partition (you need to enable CONFIG_MTD_CMDLINE_PARTS).
49 */
50void physmap_set_partitions(struct mtd_partition *parts, int num_parts);
51
52#endif /* __LINUX_MTD_PHYSMAP__ */ 35#endif /* __LINUX_MTD_PHYSMAP__ */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 3fdf251389de..172ba70306d1 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2405,6 +2405,8 @@
2405 2405
2406#define PCI_VENDOR_ID_AZWAVE 0x1a3b 2406#define PCI_VENDOR_ID_AZWAVE 0x1a3b
2407 2407
2408#define PCI_VENDOR_ID_ASMEDIA 0x1b21
2409
2408#define PCI_VENDOR_ID_TEKRAM 0x1de1 2410#define PCI_VENDOR_ID_TEKRAM 0x1de1
2409#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 2411#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
2410 2412
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 3605e947fa90..04c011038f32 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -121,6 +121,7 @@ extern const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev);
121extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev); 121extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev);
122#else 122#else
123 123
124struct pinctrl_dev;
124 125
125/* Sufficiently stupid default function when pinctrl is not in use */ 126/* Sufficiently stupid default function when pinctrl is not in use */
126static inline bool pin_is_valid(struct pinctrl_dev *pctldev, int pin) 127static inline bool pin_is_valid(struct pinctrl_dev *pctldev, int pin)
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
new file mode 100644
index 000000000000..e7c748fb6053
--- /dev/null
+++ b/include/linux/platform_data/macb.h
@@ -0,0 +1,17 @@
1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef __MACB_PDATA_H__
9#define __MACB_PDATA_H__
10
11struct macb_platform_data {
12 u32 phy_mask;
13 u8 phy_irq_pin; /* PHY IRQ */
14 u8 is_rmii; /* using RMII interface? */
15};
16
17#endif /* __MACB_PDATA_H__ */
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 0efa1f10bc2b..369273a52679 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -67,6 +67,7 @@ enum {
67 SCIx_IRDA_REGTYPE, 67 SCIx_IRDA_REGTYPE,
68 SCIx_SCIFA_REGTYPE, 68 SCIx_SCIFA_REGTYPE,
69 SCIx_SCIFB_REGTYPE, 69 SCIx_SCIFB_REGTYPE,
70 SCIx_SH2_SCIF_FIFODATA_REGTYPE,
70 SCIx_SH3_SCIF_REGTYPE, 71 SCIx_SH3_SCIF_REGTYPE,
71 SCIx_SH4_SCIF_REGTYPE, 72 SCIx_SH4_SCIF_REGTYPE,
72 SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, 73 SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h
index 3ccf18648d0a..a20831cf336a 100644
--- a/include/linux/sh_clk.h
+++ b/include/linux/sh_clk.h
@@ -52,7 +52,6 @@ struct clk {
52 52
53 unsigned long arch_flags; 53 unsigned long arch_flags;
54 void *priv; 54 void *priv;
55 struct dentry *dentry;
56 struct clk_mapping *mapping; 55 struct clk_mapping *mapping;
57 struct cpufreq_frequency_table *freq_table; 56 struct cpufreq_frequency_table *freq_table;
58 unsigned int nr_freqs; 57 unsigned int nr_freqs;
@@ -94,6 +93,9 @@ int clk_rate_table_find(struct clk *clk,
94long clk_rate_div_range_round(struct clk *clk, unsigned int div_min, 93long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
95 unsigned int div_max, unsigned long rate); 94 unsigned int div_max, unsigned long rate);
96 95
96long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
97 unsigned int mult_max, unsigned long rate);
98
97long clk_round_parent(struct clk *clk, unsigned long target, 99long clk_round_parent(struct clk *clk, unsigned long target,
98 unsigned long *best_freq, unsigned long *parent_freq, 100 unsigned long *best_freq, unsigned long *parent_freq,
99 unsigned int div_min, unsigned int div_max); 101 unsigned int div_min, unsigned int div_max);
diff --git a/include/linux/sh_pfc.h b/include/linux/sh_pfc.h
index bc8c9208f7e2..8446789216e5 100644
--- a/include/linux/sh_pfc.h
+++ b/include/linux/sh_pfc.h
@@ -104,4 +104,80 @@ struct pinmux_info {
104int register_pinmux(struct pinmux_info *pip); 104int register_pinmux(struct pinmux_info *pip);
105int unregister_pinmux(struct pinmux_info *pip); 105int unregister_pinmux(struct pinmux_info *pip);
106 106
107/* helper macro for port */
108#define PORT_1(fn, pfx, sfx) fn(pfx, sfx)
109
110#define PORT_10(fn, pfx, sfx) \
111 PORT_1(fn, pfx##0, sfx), PORT_1(fn, pfx##1, sfx), \
112 PORT_1(fn, pfx##2, sfx), PORT_1(fn, pfx##3, sfx), \
113 PORT_1(fn, pfx##4, sfx), PORT_1(fn, pfx##5, sfx), \
114 PORT_1(fn, pfx##6, sfx), PORT_1(fn, pfx##7, sfx), \
115 PORT_1(fn, pfx##8, sfx), PORT_1(fn, pfx##9, sfx)
116
117#define PORT_90(fn, pfx, sfx) \
118 PORT_10(fn, pfx##1, sfx), PORT_10(fn, pfx##2, sfx), \
119 PORT_10(fn, pfx##3, sfx), PORT_10(fn, pfx##4, sfx), \
120 PORT_10(fn, pfx##5, sfx), PORT_10(fn, pfx##6, sfx), \
121 PORT_10(fn, pfx##7, sfx), PORT_10(fn, pfx##8, sfx), \
122 PORT_10(fn, pfx##9, sfx)
123
124#define _PORT_ALL(pfx, sfx) pfx##_##sfx
125#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA)
126#define PORT_ALL(str) CPU_ALL_PORT(_PORT_ALL, PORT, str)
127#define GPIO_PORT_ALL() CPU_ALL_PORT(_GPIO_PORT, , unused)
128#define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK)
129
130/* helper macro for pinmux_enum_t */
131#define PORT_DATA_I(nr) \
132 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_IN)
133
134#define PORT_DATA_I_PD(nr) \
135 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
136 PORT##nr##_IN, PORT##nr##_IN_PD)
137
138#define PORT_DATA_I_PU(nr) \
139 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
140 PORT##nr##_IN, PORT##nr##_IN_PU)
141
142#define PORT_DATA_I_PU_PD(nr) \
143 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
144 PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
145
146#define PORT_DATA_O(nr) \
147 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT)
148
149#define PORT_DATA_IO(nr) \
150 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
151 PORT##nr##_IN)
152
153#define PORT_DATA_IO_PD(nr) \
154 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
155 PORT##nr##_IN, PORT##nr##_IN_PD)
156
157#define PORT_DATA_IO_PU(nr) \
158 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
159 PORT##nr##_IN, PORT##nr##_IN_PU)
160
161#define PORT_DATA_IO_PU_PD(nr) \
162 PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
163 PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
164
165/* helper macro for top 4 bits in PORTnCR */
166#define _PCRH(in, in_pd, in_pu, out) \
167 0, (out), (in), 0, \
168 0, 0, 0, 0, \
169 0, 0, (in_pd), 0, \
170 0, 0, (in_pu), 0
171
172#define PORTCR(nr, reg) \
173 { \
174 PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
175 _PCRH(PORT##nr##_IN, PORT##nr##_IN_PD, \
176 PORT##nr##_IN_PU, PORT##nr##_OUT), \
177 PORT##nr##_FN0, PORT##nr##_FN1, \
178 PORT##nr##_FN2, PORT##nr##_FN3, \
179 PORT##nr##_FN4, PORT##nr##_FN5, \
180 PORT##nr##_FN6, PORT##nr##_FN7 } \
181 }
182
107#endif /* __SH_PFC_H */ 183#endif /* __SH_PFC_H */
diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h
index 2f7d45bcbd24..1a7e1d20adf9 100644
--- a/include/mtd/mtd-abi.h
+++ b/include/mtd/mtd-abi.h
@@ -45,6 +45,51 @@ struct mtd_oob_buf64 {
45 __u64 usr_ptr; 45 __u64 usr_ptr;
46}; 46};
47 47
48/**
49 * MTD operation modes
50 *
51 * @MTD_OPS_PLACE_OOB: OOB data are placed at the given offset (default)
52 * @MTD_OPS_AUTO_OOB: OOB data are automatically placed at the free areas
53 * which are defined by the internal ecclayout
54 * @MTD_OPS_RAW: data are transferred as-is, with no error correction;
55 * this mode implies %MTD_OPS_PLACE_OOB
56 *
57 * These modes can be passed to ioctl(MEMWRITE) and are also used internally.
58 * See notes on "MTD file modes" for discussion on %MTD_OPS_RAW vs.
59 * %MTD_FILE_MODE_RAW.
60 */
61enum {
62 MTD_OPS_PLACE_OOB = 0,
63 MTD_OPS_AUTO_OOB = 1,
64 MTD_OPS_RAW = 2,
65};
66
67/**
68 * struct mtd_write_req - data structure for requesting a write operation
69 *
70 * @start: start address
71 * @len: length of data buffer
72 * @ooblen: length of OOB buffer
73 * @usr_data: user-provided data buffer
74 * @usr_oob: user-provided OOB buffer
75 * @mode: MTD mode (see "MTD operation modes")
76 * @padding: reserved, must be set to 0
77 *
78 * This structure supports ioctl(MEMWRITE) operations, allowing data and/or OOB
79 * writes in various modes. To write to OOB-only, set @usr_data == NULL, and to
80 * write data-only, set @usr_oob == NULL. However, setting both @usr_data and
81 * @usr_oob to NULL is not allowed.
82 */
83struct mtd_write_req {
84 __u64 start;
85 __u64 len;
86 __u64 ooblen;
87 __u64 usr_data;
88 __u64 usr_oob;
89 __u8 mode;
90 __u8 padding[7];
91};
92
48#define MTD_ABSENT 0 93#define MTD_ABSENT 0
49#define MTD_RAM 1 94#define MTD_RAM 1
50#define MTD_ROM 2 95#define MTD_ROM 2
@@ -59,13 +104,13 @@ struct mtd_oob_buf64 {
59#define MTD_NO_ERASE 0x1000 /* No erase necessary */ 104#define MTD_NO_ERASE 0x1000 /* No erase necessary */
60#define MTD_POWERUP_LOCK 0x2000 /* Always locked after reset */ 105#define MTD_POWERUP_LOCK 0x2000 /* Always locked after reset */
61 106
62// Some common devices / combinations of capabilities 107/* Some common devices / combinations of capabilities */
63#define MTD_CAP_ROM 0 108#define MTD_CAP_ROM 0
64#define MTD_CAP_RAM (MTD_WRITEABLE | MTD_BIT_WRITEABLE | MTD_NO_ERASE) 109#define MTD_CAP_RAM (MTD_WRITEABLE | MTD_BIT_WRITEABLE | MTD_NO_ERASE)
65#define MTD_CAP_NORFLASH (MTD_WRITEABLE | MTD_BIT_WRITEABLE) 110#define MTD_CAP_NORFLASH (MTD_WRITEABLE | MTD_BIT_WRITEABLE)
66#define MTD_CAP_NANDFLASH (MTD_WRITEABLE) 111#define MTD_CAP_NANDFLASH (MTD_WRITEABLE)
67 112
68/* ECC byte placement */ 113/* Obsolete ECC byte placement modes (used with obsolete MEMGETOOBSEL) */
69#define MTD_NANDECC_OFF 0 // Switch off ECC (Not recommended) 114#define MTD_NANDECC_OFF 0 // Switch off ECC (Not recommended)
70#define MTD_NANDECC_PLACE 1 // Use the given placement in the structure (YAFFS1 legacy mode) 115#define MTD_NANDECC_PLACE 1 // Use the given placement in the structure (YAFFS1 legacy mode)
71#define MTD_NANDECC_AUTOPLACE 2 // Use the default placement scheme 116#define MTD_NANDECC_AUTOPLACE 2 // Use the default placement scheme
@@ -80,21 +125,18 @@ struct mtd_oob_buf64 {
80struct mtd_info_user { 125struct mtd_info_user {
81 __u8 type; 126 __u8 type;
82 __u32 flags; 127 __u32 flags;
83 __u32 size; // Total size of the MTD 128 __u32 size; /* Total size of the MTD */
84 __u32 erasesize; 129 __u32 erasesize;
85 __u32 writesize; 130 __u32 writesize;
86 __u32 oobsize; // Amount of OOB data per block (e.g. 16) 131 __u32 oobsize; /* Amount of OOB data per block (e.g. 16) */
87 /* The below two fields are obsolete and broken, do not use them 132 __u64 padding; /* Old obsolete field; do not use */
88 * (TODO: remove at some point) */
89 __u32 ecctype;
90 __u32 eccsize;
91}; 133};
92 134
93struct region_info_user { 135struct region_info_user {
94 __u32 offset; /* At which this region starts, 136 __u32 offset; /* At which this region starts,
95 * from the beginning of the MTD */ 137 * from the beginning of the MTD */
96 __u32 erasesize; /* For this region */ 138 __u32 erasesize; /* For this region */
97 __u32 numblocks; /* Number of blocks in this region */ 139 __u32 numblocks; /* Number of blocks in this region */
98 __u32 regionindex; 140 __u32 regionindex;
99}; 141};
100 142
@@ -104,29 +146,61 @@ struct otp_info {
104 __u32 locked; 146 __u32 locked;
105}; 147};
106 148
149/*
150 * Note, the following ioctl existed in the past and was removed:
151 * #define MEMSETOOBSEL _IOW('M', 9, struct nand_oobinfo)
152 * Try to avoid adding a new ioctl with the same ioctl number.
153 */
154
155/* Get basic MTD characteristics info (better to use sysfs) */
107#define MEMGETINFO _IOR('M', 1, struct mtd_info_user) 156#define MEMGETINFO _IOR('M', 1, struct mtd_info_user)
157/* Erase segment of MTD */
108#define MEMERASE _IOW('M', 2, struct erase_info_user) 158#define MEMERASE _IOW('M', 2, struct erase_info_user)
159/* Write out-of-band data from MTD */
109#define MEMWRITEOOB _IOWR('M', 3, struct mtd_oob_buf) 160#define MEMWRITEOOB _IOWR('M', 3, struct mtd_oob_buf)
161/* Read out-of-band data from MTD */
110#define MEMREADOOB _IOWR('M', 4, struct mtd_oob_buf) 162#define MEMREADOOB _IOWR('M', 4, struct mtd_oob_buf)
163/* Lock a chip (for MTD that supports it) */
111#define MEMLOCK _IOW('M', 5, struct erase_info_user) 164#define MEMLOCK _IOW('M', 5, struct erase_info_user)
165/* Unlock a chip (for MTD that supports it) */
112#define MEMUNLOCK _IOW('M', 6, struct erase_info_user) 166#define MEMUNLOCK _IOW('M', 6, struct erase_info_user)
167/* Get the number of different erase regions */
113#define MEMGETREGIONCOUNT _IOR('M', 7, int) 168#define MEMGETREGIONCOUNT _IOR('M', 7, int)
169/* Get information about the erase region for a specific index */
114#define MEMGETREGIONINFO _IOWR('M', 8, struct region_info_user) 170#define MEMGETREGIONINFO _IOWR('M', 8, struct region_info_user)
115#define MEMSETOOBSEL _IOW('M', 9, struct nand_oobinfo) 171/* Get info about OOB modes (e.g., RAW, PLACE, AUTO) - legacy interface */
116#define MEMGETOOBSEL _IOR('M', 10, struct nand_oobinfo) 172#define MEMGETOOBSEL _IOR('M', 10, struct nand_oobinfo)
173/* Check if an eraseblock is bad */
117#define MEMGETBADBLOCK _IOW('M', 11, __kernel_loff_t) 174#define MEMGETBADBLOCK _IOW('M', 11, __kernel_loff_t)
175/* Mark an eraseblock as bad */
118#define MEMSETBADBLOCK _IOW('M', 12, __kernel_loff_t) 176#define MEMSETBADBLOCK _IOW('M', 12, __kernel_loff_t)
177/* Set OTP (One-Time Programmable) mode (factory vs. user) */
119#define OTPSELECT _IOR('M', 13, int) 178#define OTPSELECT _IOR('M', 13, int)
179/* Get number of OTP (One-Time Programmable) regions */
120#define OTPGETREGIONCOUNT _IOW('M', 14, int) 180#define OTPGETREGIONCOUNT _IOW('M', 14, int)
181/* Get all OTP (One-Time Programmable) info about MTD */
121#define OTPGETREGIONINFO _IOW('M', 15, struct otp_info) 182#define OTPGETREGIONINFO _IOW('M', 15, struct otp_info)
183/* Lock a given range of user data (must be in mode %MTD_FILE_MODE_OTP_USER) */
122#define OTPLOCK _IOR('M', 16, struct otp_info) 184#define OTPLOCK _IOR('M', 16, struct otp_info)
185/* Get ECC layout (deprecated) */
123#define ECCGETLAYOUT _IOR('M', 17, struct nand_ecclayout_user) 186#define ECCGETLAYOUT _IOR('M', 17, struct nand_ecclayout_user)
187/* Get statistics about corrected/uncorrected errors */
124#define ECCGETSTATS _IOR('M', 18, struct mtd_ecc_stats) 188#define ECCGETSTATS _IOR('M', 18, struct mtd_ecc_stats)
189/* Set MTD mode on a per-file-descriptor basis (see "MTD file modes") */
125#define MTDFILEMODE _IO('M', 19) 190#define MTDFILEMODE _IO('M', 19)
191/* Erase segment of MTD (supports 64-bit address) */
126#define MEMERASE64 _IOW('M', 20, struct erase_info_user64) 192#define MEMERASE64 _IOW('M', 20, struct erase_info_user64)
193/* Write data to OOB (64-bit version) */
127#define MEMWRITEOOB64 _IOWR('M', 21, struct mtd_oob_buf64) 194#define MEMWRITEOOB64 _IOWR('M', 21, struct mtd_oob_buf64)
195/* Read data from OOB (64-bit version) */
128#define MEMREADOOB64 _IOWR('M', 22, struct mtd_oob_buf64) 196#define MEMREADOOB64 _IOWR('M', 22, struct mtd_oob_buf64)
197/* Check if chip is locked (for MTD that supports it) */
129#define MEMISLOCKED _IOR('M', 23, struct erase_info_user) 198#define MEMISLOCKED _IOR('M', 23, struct erase_info_user)
199/*
200 * Most generic write interface; can write in-band and/or out-of-band in various
201 * modes (see "struct mtd_write_req")
202 */
203#define MEMWRITE _IOWR('M', 24, struct mtd_write_req)
130 204
131/* 205/*
132 * Obsolete legacy interface. Keep it in order not to break userspace 206 * Obsolete legacy interface. Keep it in order not to break userspace
@@ -177,13 +251,27 @@ struct mtd_ecc_stats {
177}; 251};
178 252
179/* 253/*
180 * Read/write file modes for access to MTD 254 * MTD file modes - for read/write access to MTD
255 *
256 * @MTD_FILE_MODE_NORMAL: OTP disabled, ECC enabled
257 * @MTD_FILE_MODE_OTP_FACTORY: OTP enabled in factory mode
258 * @MTD_FILE_MODE_OTP_USER: OTP enabled in user mode
259 * @MTD_FILE_MODE_RAW: OTP disabled, ECC disabled
260 *
261 * These modes can be set via ioctl(MTDFILEMODE). The mode mode will be retained
262 * separately for each open file descriptor.
263 *
264 * Note: %MTD_FILE_MODE_RAW provides the same functionality as %MTD_OPS_RAW -
265 * raw access to the flash, without error correction or autoplacement schemes.
266 * Wherever possible, the MTD_OPS_* mode will override the MTD_FILE_MODE_* mode
267 * (e.g., when using ioctl(MEMWRITE)), but in some cases, the MTD_FILE_MODE is
268 * used out of necessity (e.g., `write()', ioctl(MEMWRITEOOB64)).
181 */ 269 */
182enum mtd_file_modes { 270enum mtd_file_modes {
183 MTD_MODE_NORMAL = MTD_OTP_OFF, 271 MTD_FILE_MODE_NORMAL = MTD_OTP_OFF,
184 MTD_MODE_OTP_FACTORY = MTD_OTP_FACTORY, 272 MTD_FILE_MODE_OTP_FACTORY = MTD_OTP_FACTORY,
185 MTD_MODE_OTP_USER = MTD_OTP_USER, 273 MTD_FILE_MODE_OTP_USER = MTD_OTP_USER,
186 MTD_MODE_RAW, 274 MTD_FILE_MODE_RAW,
187}; 275};
188 276
189#endif /* __MTD_ABI_H__ */ 277#endif /* __MTD_ABI_H__ */
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index d5eee2093b1e..e2e3ecad1008 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -211,6 +211,7 @@ struct rfcomm_dlc {
211#define RFCOMM_AUTH_ACCEPT 6 211#define RFCOMM_AUTH_ACCEPT 6
212#define RFCOMM_AUTH_REJECT 7 212#define RFCOMM_AUTH_REJECT 7
213#define RFCOMM_DEFER_SETUP 8 213#define RFCOMM_DEFER_SETUP 8
214#define RFCOMM_ENC_DROP 9
214 215
215/* Scheduling flags and events */ 216/* Scheduling flags and events */
216#define RFCOMM_SCHED_WAKEUP 31 217#define RFCOMM_SCHED_WAKEUP 31
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index dc1123aa8181..72eddd1b410b 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -3567,8 +3567,9 @@ rate_lowest_index(struct ieee80211_supported_band *sband,
3567 return i; 3567 return i;
3568 3568
3569 /* warn when we cannot find a rate. */ 3569 /* warn when we cannot find a rate. */
3570 WARN_ON(1); 3570 WARN_ON_ONCE(1);
3571 3571
3572 /* and return 0 (the lowest index) */
3572 return 0; 3573 return 0;
3573} 3574}
3574 3575
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 98c185441bee..cb1f3504687f 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -192,8 +192,15 @@ enum {
192 * NLA_NUL_STRING Maximum length of string (excluding NUL) 192 * NLA_NUL_STRING Maximum length of string (excluding NUL)
193 * NLA_FLAG Unused 193 * NLA_FLAG Unused
194 * NLA_BINARY Maximum length of attribute payload 194 * NLA_BINARY Maximum length of attribute payload
195 * NLA_NESTED_COMPAT Exact length of structure payload 195 * NLA_NESTED Don't use `len' field -- length verification is
196 * All other Exact length of attribute payload 196 * done by checking len of nested header (or empty)
197 * NLA_NESTED_COMPAT Minimum length of structure payload
198 * NLA_U8, NLA_U16,
199 * NLA_U32, NLA_U64,
200 * NLA_MSECS Leaving the length field zero will verify the
201 * given type fits, using it verifies minimum length
202 * just like "All other"
203 * All other Minimum length of attribute payload
197 * 204 *
198 * Example: 205 * Example:
199 * static const struct nla_policy my_policy[ATTR_MAX+1] = { 206 * static const struct nla_policy my_policy[ATTR_MAX+1] = {
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 56db75147186..995e3bd3417b 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -70,6 +70,7 @@ static struct pm_qos_constraints cpu_dma_constraints = {
70}; 70};
71static struct pm_qos_object cpu_dma_pm_qos = { 71static struct pm_qos_object cpu_dma_pm_qos = {
72 .constraints = &cpu_dma_constraints, 72 .constraints = &cpu_dma_constraints,
73 .name = "cpu_dma_latency",
73}; 74};
74 75
75static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); 76static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
diff --git a/lib/nlattr.c b/lib/nlattr.c
index ac09f2226dc7..a8408b6cacdf 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -20,6 +20,7 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
20 [NLA_U16] = sizeof(u16), 20 [NLA_U16] = sizeof(u16),
21 [NLA_U32] = sizeof(u32), 21 [NLA_U32] = sizeof(u32),
22 [NLA_U64] = sizeof(u64), 22 [NLA_U64] = sizeof(u64),
23 [NLA_MSECS] = sizeof(u64),
23 [NLA_NESTED] = NLA_HDRLEN, 24 [NLA_NESTED] = NLA_HDRLEN,
24}; 25};
25 26
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0360d1b5a1dd..a3278f005230 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1097,13 +1097,13 @@ static void balance_dirty_pages(struct address_space *mapping,
1097 pos_ratio = bdi_position_ratio(bdi, dirty_thresh, 1097 pos_ratio = bdi_position_ratio(bdi, dirty_thresh,
1098 background_thresh, nr_dirty, 1098 background_thresh, nr_dirty,
1099 bdi_thresh, bdi_dirty); 1099 bdi_thresh, bdi_dirty);
1100 if (unlikely(pos_ratio == 0)) { 1100 task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >>
1101 RATELIMIT_CALC_SHIFT;
1102 if (unlikely(task_ratelimit == 0)) {
1101 pause = max_pause; 1103 pause = max_pause;
1102 goto pause; 1104 goto pause;
1103 } 1105 }
1104 task_ratelimit = (u64)dirty_ratelimit * 1106 pause = HZ * pages_dirtied / task_ratelimit;
1105 pos_ratio >> RATELIMIT_CALC_SHIFT;
1106 pause = (HZ * pages_dirtied) / (task_ratelimit | 1);
1107 if (unlikely(pause <= 0)) { 1107 if (unlikely(pause <= 0)) {
1108 trace_balance_dirty_pages(bdi, 1108 trace_balance_dirty_pages(bdi,
1109 dirty_thresh, 1109 dirty_thresh,
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index b84458dcc226..be84ae33ae36 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -613,7 +613,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
613 if (!test_bit(HCI_RAW, &hdev->flags)) { 613 if (!test_bit(HCI_RAW, &hdev->flags)) {
614 set_bit(HCI_INIT, &hdev->flags); 614 set_bit(HCI_INIT, &hdev->flags);
615 __hci_request(hdev, hci_reset_req, 0, 615 __hci_request(hdev, hci_reset_req, 0,
616 msecs_to_jiffies(250)); 616 msecs_to_jiffies(HCI_INIT_TIMEOUT));
617 clear_bit(HCI_INIT, &hdev->flags); 617 clear_bit(HCI_INIT, &hdev->flags);
618 } 618 }
619 619
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 940858a48cbd..2c7634296866 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -148,8 +148,6 @@ static int read_index_list(struct sock *sk)
148 148
149 hci_del_off_timer(d); 149 hci_del_off_timer(d);
150 150
151 set_bit(HCI_MGMT, &d->flags);
152
153 if (test_bit(HCI_SETUP, &d->flags)) 151 if (test_bit(HCI_SETUP, &d->flags))
154 continue; 152 continue;
155 153
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 38b618c96de6..4e32e18211f9 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1802,6 +1802,11 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
1802 continue; 1802 continue;
1803 } 1803 }
1804 1804
1805 if (test_bit(RFCOMM_ENC_DROP, &d->flags)) {
1806 __rfcomm_dlc_close(d, ECONNREFUSED);
1807 continue;
1808 }
1809
1805 if (test_and_clear_bit(RFCOMM_AUTH_ACCEPT, &d->flags)) { 1810 if (test_and_clear_bit(RFCOMM_AUTH_ACCEPT, &d->flags)) {
1806 rfcomm_dlc_clear_timer(d); 1811 rfcomm_dlc_clear_timer(d);
1807 if (d->out) { 1812 if (d->out) {
@@ -2077,7 +2082,7 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
2077 if (test_and_clear_bit(RFCOMM_SEC_PENDING, &d->flags)) { 2082 if (test_and_clear_bit(RFCOMM_SEC_PENDING, &d->flags)) {
2078 rfcomm_dlc_clear_timer(d); 2083 rfcomm_dlc_clear_timer(d);
2079 if (status || encrypt == 0x00) { 2084 if (status || encrypt == 0x00) {
2080 __rfcomm_dlc_close(d, ECONNREFUSED); 2085 set_bit(RFCOMM_ENC_DROP, &d->flags);
2081 continue; 2086 continue;
2082 } 2087 }
2083 } 2088 }
@@ -2088,7 +2093,7 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
2088 rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); 2093 rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
2089 continue; 2094 continue;
2090 } else if (d->sec_level == BT_SECURITY_HIGH) { 2095 } else if (d->sec_level == BT_SECURITY_HIGH) {
2091 __rfcomm_dlc_close(d, ECONNREFUSED); 2096 set_bit(RFCOMM_ENC_DROP, &d->flags);
2092 continue; 2097 continue;
2093 } 2098 }
2094 } 2099 }
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index ebd7fb101fbf..d06c65fa5526 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -832,6 +832,12 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
832 if (is_multicast_ether_addr(mac)) 832 if (is_multicast_ether_addr(mac))
833 return -EINVAL; 833 return -EINVAL;
834 834
835 /* Only TDLS-supporting stations can add TDLS peers */
836 if ((params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
837 !((wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
838 sdata->vif.type == NL80211_IFTYPE_STATION))
839 return -ENOTSUPP;
840
835 sta = sta_info_alloc(sdata, mac, GFP_KERNEL); 841 sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
836 if (!sta) 842 if (!sta)
837 return -ENOMEM; 843 return -ENOMEM;
@@ -841,12 +847,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
841 847
842 sta_apply_parameters(local, sta, params); 848 sta_apply_parameters(local, sta, params);
843 849
844 /* Only TDLS-supporting stations can add TDLS peers */
845 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
846 !((wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
847 sdata->vif.type == NL80211_IFTYPE_STATION))
848 return -ENOTSUPP;
849
850 rate_control_rate_init(sta); 850 rate_control_rate_init(sta);
851 851
852 layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 852 layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 4c3d1f591bec..ea10a51babda 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -389,6 +389,7 @@ struct ieee80211_if_managed {
389 389
390 unsigned long timers_running; /* used for quiesce/restart */ 390 unsigned long timers_running; /* used for quiesce/restart */
391 bool powersave; /* powersave requested for this iface */ 391 bool powersave; /* powersave requested for this iface */
392 bool broken_ap; /* AP is broken -- turn off powersave */
392 enum ieee80211_smps_mode req_smps, /* requested smps mode */ 393 enum ieee80211_smps_mode req_smps, /* requested smps mode */
393 ap_smps, /* smps mode AP thinks we're in */ 394 ap_smps, /* smps mode AP thinks we're in */
394 driver_smps_mode; /* smps mode request */ 395 driver_smps_mode; /* smps mode request */
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 96f9fae32495..72c8bea81a6c 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -639,6 +639,9 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
639 if (!mgd->powersave) 639 if (!mgd->powersave)
640 return false; 640 return false;
641 641
642 if (mgd->broken_ap)
643 return false;
644
642 if (!mgd->associated) 645 if (!mgd->associated)
643 return false; 646 return false;
644 647
@@ -1491,10 +1494,21 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1491 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); 1494 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
1492 1495
1493 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) 1496 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
1494 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " 1497 printk(KERN_DEBUG
1495 "set\n", sdata->name, aid); 1498 "%s: invalid AID value 0x%x; bits 15:14 not set\n",
1499 sdata->name, aid);
1496 aid &= ~(BIT(15) | BIT(14)); 1500 aid &= ~(BIT(15) | BIT(14));
1497 1501
1502 ifmgd->broken_ap = false;
1503
1504 if (aid == 0 || aid > IEEE80211_MAX_AID) {
1505 printk(KERN_DEBUG
1506 "%s: invalid AID value %d (out of range), turn off PS\n",
1507 sdata->name, aid);
1508 aid = 0;
1509 ifmgd->broken_ap = true;
1510 }
1511
1498 pos = mgmt->u.assoc_resp.variable; 1512 pos = mgmt->u.assoc_resp.variable;
1499 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); 1513 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1500 1514
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 94472eb34d76..6c53b6d1002b 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -1084,14 +1084,13 @@ static void ieee80211_work_work(struct work_struct *work)
1084 continue; 1084 continue;
1085 if (wk->chan != local->tmp_channel) 1085 if (wk->chan != local->tmp_channel)
1086 continue; 1086 continue;
1087 if (ieee80211_work_ct_coexists(wk->chan_type, 1087 if (!ieee80211_work_ct_coexists(wk->chan_type,
1088 local->tmp_channel_type)) 1088 local->tmp_channel_type))
1089 continue; 1089 continue;
1090 remain_off_channel = true; 1090 remain_off_channel = true;
1091 } 1091 }
1092 1092
1093 if (!remain_off_channel && local->tmp_channel) { 1093 if (!remain_off_channel && local->tmp_channel) {
1094 bool on_oper_chan = ieee80211_cfg_on_oper_channel(local);
1095 local->tmp_channel = NULL; 1094 local->tmp_channel = NULL;
1096 /* If tmp_channel wasn't operating channel, then 1095 /* If tmp_channel wasn't operating channel, then
1097 * we need to go back on-channel. 1096 * we need to go back on-channel.
@@ -1101,7 +1100,7 @@ static void ieee80211_work_work(struct work_struct *work)
1101 * we still need to do a hardware config. Currently, 1100 * we still need to do a hardware config. Currently,
1102 * we cannot be here while scanning, however. 1101 * we cannot be here while scanning, however.
1103 */ 1102 */
1104 if (ieee80211_cfg_on_oper_channel(local) && !on_oper_chan) 1103 if (!ieee80211_cfg_on_oper_channel(local))
1105 ieee80211_hw_config(local, 0); 1104 ieee80211_hw_config(local, 0);
1106 1105
1107 /* At the least, we need to disable offchannel_ps, 1106 /* At the least, we need to disable offchannel_ps,
diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c
index f346395314ba..c43612ee96bb 100644
--- a/net/wanrouter/wanproc.c
+++ b/net/wanrouter/wanproc.c
@@ -81,7 +81,6 @@ static struct proc_dir_entry *proc_router;
81 * Iterator 81 * Iterator
82 */ 82 */
83static void *r_start(struct seq_file *m, loff_t *pos) 83static void *r_start(struct seq_file *m, loff_t *pos)
84 __acquires(kernel_lock)
85{ 84{
86 struct wan_device *wandev; 85 struct wan_device *wandev;
87 loff_t l = *pos; 86 loff_t l = *pos;
@@ -103,7 +102,6 @@ static void *r_next(struct seq_file *m, void *v, loff_t *pos)
103} 102}
104 103
105static void r_stop(struct seq_file *m, void *v) 104static void r_stop(struct seq_file *m, void *v)
106 __releases(kernel_lock)
107{ 105{
108 mutex_unlock(&config_mutex); 106 mutex_unlock(&config_mutex);
109} 107}
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 5dbab38d04af..130cfe677d60 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -52,6 +52,7 @@ struct link_slave {
52 struct link_ctl_info info; 52 struct link_ctl_info info;
53 int vals[2]; /* current values */ 53 int vals[2]; /* current values */
54 unsigned int flags; 54 unsigned int flags;
55 struct snd_kcontrol *kctl; /* original kcontrol pointer */
55 struct snd_kcontrol slave; /* the copy of original control entry */ 56 struct snd_kcontrol slave; /* the copy of original control entry */
56}; 57};
57 58
@@ -252,6 +253,7 @@ int _snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave,
252 slave->count * sizeof(*slave->vd), GFP_KERNEL); 253 slave->count * sizeof(*slave->vd), GFP_KERNEL);
253 if (!srec) 254 if (!srec)
254 return -ENOMEM; 255 return -ENOMEM;
256 srec->kctl = slave;
255 srec->slave = *slave; 257 srec->slave = *slave;
256 memcpy(srec->slave.vd, slave->vd, slave->count * sizeof(*slave->vd)); 258 memcpy(srec->slave.vd, slave->vd, slave->count * sizeof(*slave->vd));
257 srec->master = master_link; 259 srec->master = master_link;
@@ -333,10 +335,18 @@ static int master_put(struct snd_kcontrol *kcontrol,
333static void master_free(struct snd_kcontrol *kcontrol) 335static void master_free(struct snd_kcontrol *kcontrol)
334{ 336{
335 struct link_master *master = snd_kcontrol_chip(kcontrol); 337 struct link_master *master = snd_kcontrol_chip(kcontrol);
336 struct link_slave *slave; 338 struct link_slave *slave, *n;
337 339
338 list_for_each_entry(slave, &master->slaves, list) 340 /* free all slave links and retore the original slave kctls */
339 slave->master = NULL; 341 list_for_each_entry_safe(slave, n, &master->slaves, list) {
342 struct snd_kcontrol *sctl = slave->kctl;
343 struct list_head olist = sctl->list;
344 memcpy(sctl, &slave->slave, sizeof(*sctl));
345 memcpy(sctl->vd, slave->slave.vd,
346 sctl->count * sizeof(*sctl->vd));
347 sctl->list = olist; /* keep the current linked-list */
348 kfree(slave);
349 }
340 kfree(master); 350 kfree(master);
341} 351}
342 352
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 916a1863af73..e44b107fdc75 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2331,6 +2331,39 @@ int snd_hda_codec_reset(struct hda_codec *codec)
2331 return 0; 2331 return 0;
2332} 2332}
2333 2333
2334typedef int (*map_slave_func_t)(void *, struct snd_kcontrol *);
2335
2336/* apply the function to all matching slave ctls in the mixer list */
2337static int map_slaves(struct hda_codec *codec, const char * const *slaves,
2338 map_slave_func_t func, void *data)
2339{
2340 struct hda_nid_item *items;
2341 const char * const *s;
2342 int i, err;
2343
2344 items = codec->mixers.list;
2345 for (i = 0; i < codec->mixers.used; i++) {
2346 struct snd_kcontrol *sctl = items[i].kctl;
2347 if (!sctl || !sctl->id.name ||
2348 sctl->id.iface != SNDRV_CTL_ELEM_IFACE_MIXER)
2349 continue;
2350 for (s = slaves; *s; s++) {
2351 if (!strcmp(sctl->id.name, *s)) {
2352 err = func(data, sctl);
2353 if (err)
2354 return err;
2355 break;
2356 }
2357 }
2358 }
2359 return 0;
2360}
2361
2362static int check_slave_present(void *data, struct snd_kcontrol *sctl)
2363{
2364 return 1;
2365}
2366
2334/** 2367/**
2335 * snd_hda_add_vmaster - create a virtual master control and add slaves 2368 * snd_hda_add_vmaster - create a virtual master control and add slaves
2336 * @codec: HD-audio codec 2369 * @codec: HD-audio codec
@@ -2351,12 +2384,10 @@ int snd_hda_add_vmaster(struct hda_codec *codec, char *name,
2351 unsigned int *tlv, const char * const *slaves) 2384 unsigned int *tlv, const char * const *slaves)
2352{ 2385{
2353 struct snd_kcontrol *kctl; 2386 struct snd_kcontrol *kctl;
2354 const char * const *s;
2355 int err; 2387 int err;
2356 2388
2357 for (s = slaves; *s && !snd_hda_find_mixer_ctl(codec, *s); s++) 2389 err = map_slaves(codec, slaves, check_slave_present, NULL);
2358 ; 2390 if (err != 1) {
2359 if (!*s) {
2360 snd_printdd("No slave found for %s\n", name); 2391 snd_printdd("No slave found for %s\n", name);
2361 return 0; 2392 return 0;
2362 } 2393 }
@@ -2367,23 +2398,10 @@ int snd_hda_add_vmaster(struct hda_codec *codec, char *name,
2367 if (err < 0) 2398 if (err < 0)
2368 return err; 2399 return err;
2369 2400
2370 for (s = slaves; *s; s++) { 2401 err = map_slaves(codec, slaves, (map_slave_func_t)snd_ctl_add_slave,
2371 struct snd_kcontrol *sctl; 2402 kctl);
2372 int i = 0; 2403 if (err < 0)
2373 for (;;) { 2404 return err;
2374 sctl = _snd_hda_find_mixer_ctl(codec, *s, i);
2375 if (!sctl) {
2376 if (!i)
2377 snd_printdd("Cannot find slave %s, "
2378 "skipped\n", *s);
2379 break;
2380 }
2381 err = snd_ctl_add_slave(kctl, sctl);
2382 if (err < 0)
2383 return err;
2384 i++;
2385 }
2386 }
2387 return 0; 2405 return 0;
2388} 2406}
2389EXPORT_SYMBOL_HDA(snd_hda_add_vmaster); 2407EXPORT_SYMBOL_HDA(snd_hda_add_vmaster);
@@ -4752,6 +4770,7 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
4752 memset(sequences_hp, 0, sizeof(sequences_hp)); 4770 memset(sequences_hp, 0, sizeof(sequences_hp));
4753 assoc_line_out = 0; 4771 assoc_line_out = 0;
4754 4772
4773 codec->ignore_misc_bit = true;
4755 end_nid = codec->start_nid + codec->num_nodes; 4774 end_nid = codec->start_nid + codec->num_nodes;
4756 for (nid = codec->start_nid; nid < end_nid; nid++) { 4775 for (nid = codec->start_nid; nid < end_nid; nid++) {
4757 unsigned int wid_caps = get_wcaps(codec, nid); 4776 unsigned int wid_caps = get_wcaps(codec, nid);
@@ -4767,6 +4786,9 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
4767 continue; 4786 continue;
4768 4787
4769 def_conf = snd_hda_codec_get_pincfg(codec, nid); 4788 def_conf = snd_hda_codec_get_pincfg(codec, nid);
4789 if (!(get_defcfg_misc(snd_hda_codec_get_pincfg(codec, nid)) &
4790 AC_DEFCFG_MISC_NO_PRESENCE))
4791 codec->ignore_misc_bit = false;
4770 conn = get_defcfg_connect(def_conf); 4792 conn = get_defcfg_connect(def_conf);
4771 if (conn == AC_JACK_PORT_NONE) 4793 if (conn == AC_JACK_PORT_NONE)
4772 continue; 4794 continue;
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 755f2b0f9d8e..564471169cae 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -854,6 +854,7 @@ struct hda_codec {
854 unsigned int no_sticky_stream:1; /* no sticky-PCM stream assignment */ 854 unsigned int no_sticky_stream:1; /* no sticky-PCM stream assignment */
855 unsigned int pins_shutup:1; /* pins are shut up */ 855 unsigned int pins_shutup:1; /* pins are shut up */
856 unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */ 856 unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */
857 unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */
857#ifdef CONFIG_SND_HDA_POWER_SAVE 858#ifdef CONFIG_SND_HDA_POWER_SAVE
858 unsigned int power_on :1; /* current (global) power-state */ 859 unsigned int power_on :1; /* current (global) power-state */
859 unsigned int power_transition :1; /* power-state in transition */ 860 unsigned int power_transition :1; /* power-state in transition */
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index dcbea0da0fa2..6579e0f2bb57 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -510,13 +510,15 @@ int snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid);
510 510
511static inline bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid) 511static inline bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid)
512{ 512{
513 return (snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_PRES_DETECT) && 513 if (!(snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_PRES_DETECT))
514 /* disable MISC_NO_PRESENCE check because it may break too 514 return false;
515 * many devices 515 if (!codec->ignore_misc_bit &&
516 */ 516 (get_defcfg_misc(snd_hda_codec_get_pincfg(codec, nid)) &
517 /*(get_defcfg_misc(snd_hda_codec_get_pincfg(codec, nid) & 517 AC_DEFCFG_MISC_NO_PRESENCE))
518 AC_DEFCFG_MISC_NO_PRESENCE)) &&*/ 518 return false;
519 (get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP); 519 if (!(get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP))
520 return false;
521 return true;
520} 522}
521 523
522/* flags for hda_nid_item */ 524/* flags for hda_nid_item */
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 5e706e4d1737..0de21193a2b0 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3062,7 +3062,6 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3062 SND_PCI_QUIRK(0x1043, 0x1993, "Asus U50F", CXT5066_ASUS), 3062 SND_PCI_QUIRK(0x1043, 0x1993, "Asus U50F", CXT5066_ASUS),
3063 SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD), 3063 SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
3064 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), 3064 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
3065 SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
3066 SND_PCI_QUIRK(0x14f1, 0x0101, "Conexant Reference board", 3065 SND_PCI_QUIRK(0x14f1, 0x0101, "Conexant Reference board",
3067 CXT5066_LAPTOP), 3066 CXT5066_LAPTOP),
3068 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5), 3067 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a24e068a021b..308bb575bc06 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -284,7 +284,7 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
284 struct alc_spec *spec = codec->spec; 284 struct alc_spec *spec = codec->spec;
285 const struct hda_input_mux *imux; 285 const struct hda_input_mux *imux;
286 unsigned int mux_idx; 286 unsigned int mux_idx;
287 int i, type; 287 int i, type, num_conns;
288 hda_nid_t nid; 288 hda_nid_t nid;
289 289
290 mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx; 290 mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx;
@@ -307,16 +307,17 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
307 spec->capsrc_nids[adc_idx] : spec->adc_nids[adc_idx]; 307 spec->capsrc_nids[adc_idx] : spec->adc_nids[adc_idx];
308 308
309 /* no selection? */ 309 /* no selection? */
310 if (snd_hda_get_conn_list(codec, nid, NULL) <= 1) 310 num_conns = snd_hda_get_conn_list(codec, nid, NULL);
311 if (num_conns <= 1)
311 return 1; 312 return 1;
312 313
313 type = get_wcaps_type(get_wcaps(codec, nid)); 314 type = get_wcaps_type(get_wcaps(codec, nid));
314 if (type == AC_WID_AUD_MIX) { 315 if (type == AC_WID_AUD_MIX) {
315 /* Matrix-mixer style (e.g. ALC882) */ 316 /* Matrix-mixer style (e.g. ALC882) */
316 for (i = 0; i < imux->num_items; i++) { 317 int active = imux->items[idx].index;
317 unsigned int v = (i == idx) ? 0 : HDA_AMP_MUTE; 318 for (i = 0; i < num_conns; i++) {
318 snd_hda_codec_amp_stereo(codec, nid, HDA_INPUT, 319 unsigned int v = (i == active) ? 0 : HDA_AMP_MUTE;
319 imux->items[i].index, 320 snd_hda_codec_amp_stereo(codec, nid, HDA_INPUT, i,
320 HDA_AMP_MUTE, v); 321 HDA_AMP_MUTE, v);
321 } 322 }
322 } else { 323 } else {
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 4e715fefebef..edc2b7bc177c 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -95,6 +95,7 @@ enum {
95 STAC_92HD83XXX_REF, 95 STAC_92HD83XXX_REF,
96 STAC_92HD83XXX_PWR_REF, 96 STAC_92HD83XXX_PWR_REF,
97 STAC_DELL_S14, 97 STAC_DELL_S14,
98 STAC_DELL_VOSTRO_3500,
98 STAC_92HD83XXX_HP, 99 STAC_92HD83XXX_HP,
99 STAC_92HD83XXX_HP_cNB11_INTQUAD, 100 STAC_92HD83XXX_HP_cNB11_INTQUAD,
100 STAC_HP_DV7_4000, 101 STAC_HP_DV7_4000,
@@ -1659,6 +1660,12 @@ static const unsigned int dell_s14_pin_configs[10] = {
1659 0x40f000f0, 0x40f000f0, 1660 0x40f000f0, 0x40f000f0,
1660}; 1661};
1661 1662
1663static const unsigned int dell_vostro_3500_pin_configs[10] = {
1664 0x02a11020, 0x0221101f, 0x400000f0, 0x90170110,
1665 0x400000f1, 0x400000f2, 0x400000f3, 0x90a60160,
1666 0x400000f4, 0x400000f5,
1667};
1668
1662static const unsigned int hp_dv7_4000_pin_configs[10] = { 1669static const unsigned int hp_dv7_4000_pin_configs[10] = {
1663 0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110, 1670 0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110,
1664 0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140, 1671 0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140,
@@ -1675,6 +1682,7 @@ static const unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = {
1675 [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs, 1682 [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs,
1676 [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs, 1683 [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs,
1677 [STAC_DELL_S14] = dell_s14_pin_configs, 1684 [STAC_DELL_S14] = dell_s14_pin_configs,
1685 [STAC_DELL_VOSTRO_3500] = dell_vostro_3500_pin_configs,
1678 [STAC_92HD83XXX_HP_cNB11_INTQUAD] = hp_cNB11_intquad_pin_configs, 1686 [STAC_92HD83XXX_HP_cNB11_INTQUAD] = hp_cNB11_intquad_pin_configs,
1679 [STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs, 1687 [STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs,
1680}; 1688};
@@ -1684,6 +1692,7 @@ static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = {
1684 [STAC_92HD83XXX_REF] = "ref", 1692 [STAC_92HD83XXX_REF] = "ref",
1685 [STAC_92HD83XXX_PWR_REF] = "mic-ref", 1693 [STAC_92HD83XXX_PWR_REF] = "mic-ref",
1686 [STAC_DELL_S14] = "dell-s14", 1694 [STAC_DELL_S14] = "dell-s14",
1695 [STAC_DELL_VOSTRO_3500] = "dell-vostro-3500",
1687 [STAC_92HD83XXX_HP] = "hp", 1696 [STAC_92HD83XXX_HP] = "hp",
1688 [STAC_92HD83XXX_HP_cNB11_INTQUAD] = "hp_cNB11_intquad", 1697 [STAC_92HD83XXX_HP_cNB11_INTQUAD] = "hp_cNB11_intquad",
1689 [STAC_HP_DV7_4000] = "hp-dv7-4000", 1698 [STAC_HP_DV7_4000] = "hp-dv7-4000",
@@ -1697,6 +1706,8 @@ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
1697 "DFI LanParty", STAC_92HD83XXX_REF), 1706 "DFI LanParty", STAC_92HD83XXX_REF),
1698 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba, 1707 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba,
1699 "unknown Dell", STAC_DELL_S14), 1708 "unknown Dell", STAC_DELL_S14),
1709 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x1028,
1710 "Dell Vostro 3500", STAC_DELL_VOSTRO_3500),
1700 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600, 1711 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600,
1701 "HP", STAC_92HD83XXX_HP), 1712 "HP", STAC_92HD83XXX_HP),
1702 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1656, 1713 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1656,
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 29e312597f20..11718b49b2e2 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -1077,6 +1077,13 @@ static snd_pcm_uframes_t snd_intel8x0_pcm_pointer(struct snd_pcm_substream *subs
1077 } 1077 }
1078 if (civ != igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV)) 1078 if (civ != igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV))
1079 continue; 1079 continue;
1080
1081 /* IO read operation is very expensive inside virtual machine
1082 * as it is emulated. The probability that subsequent PICB read
1083 * will return different result is high enough to loop till
1084 * timeout here.
1085 * Same CIV is strict enough condition to be sure that PICB
1086 * is valid inside VM on emulated card. */
1080 if (chip->inside_vm) 1087 if (chip->inside_vm)
1081 break; 1088 break;
1082 if (ptr1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb)) 1089 if (ptr1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb))
@@ -2930,6 +2937,45 @@ static unsigned int sis_codec_bits[3] = {
2930 ICH_PCR, ICH_SCR, ICH_SIS_TCR 2937 ICH_PCR, ICH_SCR, ICH_SIS_TCR
2931}; 2938};
2932 2939
2940static int __devinit snd_intel8x0_inside_vm(struct pci_dev *pci)
2941{
2942 int result = inside_vm;
2943 char *msg = NULL;
2944
2945 /* check module parameter first (override detection) */
2946 if (result >= 0) {
2947 msg = result ? "enable (forced) VM" : "disable (forced) VM";
2948 goto fini;
2949 }
2950
2951 /* detect KVM and Parallels virtual environments */
2952 result = kvm_para_available();
2953#ifdef X86_FEATURE_HYPERVISOR
2954 result = result || boot_cpu_has(X86_FEATURE_HYPERVISOR);
2955#endif
2956 if (!result)
2957 goto fini;
2958
2959 /* check for known (emulated) devices */
2960 if (pci->subsystem_vendor == 0x1af4 &&
2961 pci->subsystem_device == 0x1100) {
2962 /* KVM emulated sound, PCI SSID: 1af4:1100 */
2963 msg = "enable KVM";
2964 } else if (pci->subsystem_vendor == 0x1ab8) {
2965 /* Parallels VM emulated sound, PCI SSID: 1ab8:xxxx */
2966 msg = "enable Parallels VM";
2967 } else {
2968 msg = "disable (unknown or VT-d) VM";
2969 result = 0;
2970 }
2971
2972fini:
2973 if (msg != NULL)
2974 printk(KERN_INFO "intel8x0: %s optimization\n", msg);
2975
2976 return result;
2977}
2978
2933static int __devinit snd_intel8x0_create(struct snd_card *card, 2979static int __devinit snd_intel8x0_create(struct snd_card *card,
2934 struct pci_dev *pci, 2980 struct pci_dev *pci,
2935 unsigned long device_type, 2981 unsigned long device_type,
@@ -2997,9 +3043,7 @@ static int __devinit snd_intel8x0_create(struct snd_card *card,
2997 if (xbox) 3043 if (xbox)
2998 chip->xbox = 1; 3044 chip->xbox = 1;
2999 3045
3000 chip->inside_vm = inside_vm; 3046 chip->inside_vm = snd_intel8x0_inside_vm(pci);
3001 if (inside_vm)
3002 printk(KERN_INFO "intel8x0: enable KVM optimization\n");
3003 3047
3004 if (pci->vendor == PCI_VENDOR_ID_INTEL && 3048 if (pci->vendor == PCI_VENDOR_ID_INTEL &&
3005 pci->device == PCI_DEVICE_ID_INTEL_440MX) 3049 pci->device == PCI_DEVICE_ID_INTEL_440MX)
@@ -3243,14 +3287,6 @@ static int __devinit snd_intel8x0_probe(struct pci_dev *pci,
3243 buggy_irq = 0; 3287 buggy_irq = 0;
3244 } 3288 }
3245 3289
3246 if (inside_vm < 0) {
3247 /* detect KVM and Parallels virtual environments */
3248 inside_vm = kvm_para_available();
3249#if defined(__i386__) || defined(__x86_64__)
3250 inside_vm = inside_vm || boot_cpu_has(X86_FEATURE_HYPERVISOR);
3251#endif
3252 }
3253
3254 if ((err = snd_intel8x0_create(card, pci, pci_id->driver_data, 3290 if ((err = snd_intel8x0_create(card, pci, pci_id->driver_data,
3255 &chip)) < 0) { 3291 &chip)) < 0) {
3256 snd_card_free(card); 3292 snd_card_free(card);
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index a3ce1b22620d..1aa52eff526a 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -876,7 +876,7 @@ static void __devinit snd_ps3_audio_set_base_addr(uint64_t ioaddr_start)
876 (0x0fUL << 12) | 876 (0x0fUL << 12) |
877 (PS3_AUDIO_IOID); 877 (PS3_AUDIO_IOID);
878 878
879 ret = lv1_gpu_attribute(0x100, 0x007, val, 0, 0); 879 ret = lv1_gpu_attribute(0x100, 0x007, val);
880 if (ret) 880 if (ret)
881 pr_info("%s: gpu_attribute failed %d\n", __func__, 881 pr_info("%s: gpu_attribute failed %d\n", __func__,
882 ret); 882 ret);
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 6b73efd26991..9c982e47eb99 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -56,7 +56,7 @@ static int wm8994_retune_mobile_base[] = {
56static int wm8994_readable(struct snd_soc_codec *codec, unsigned int reg) 56static int wm8994_readable(struct snd_soc_codec *codec, unsigned int reg)
57{ 57{
58 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); 58 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
59 struct wm8994 *control = wm8994->control_data; 59 struct wm8994 *control = codec->control_data;
60 60
61 switch (reg) { 61 switch (reg) {
62 case WM8994_GPIO_1: 62 case WM8994_GPIO_1:
@@ -3030,19 +3030,34 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
3030{ 3030{
3031 struct wm8994_priv *wm8994 = data; 3031 struct wm8994_priv *wm8994 = data;
3032 struct snd_soc_codec *codec = wm8994->codec; 3032 struct snd_soc_codec *codec = wm8994->codec;
3033 int reg; 3033 int reg, count;
3034 3034
3035 reg = snd_soc_read(codec, WM8958_MIC_DETECT_3); 3035 /* We may occasionally read a detection without an impedence
3036 if (reg < 0) { 3036 * range being provided - if that happens loop again.
3037 dev_err(codec->dev, "Failed to read mic detect status: %d\n", 3037 */
3038 reg); 3038 count = 10;
3039 return IRQ_NONE; 3039 do {
3040 } 3040 reg = snd_soc_read(codec, WM8958_MIC_DETECT_3);
3041 if (reg < 0) {
3042 dev_err(codec->dev,
3043 "Failed to read mic detect status: %d\n",
3044 reg);
3045 return IRQ_NONE;
3046 }
3041 3047
3042 if (!(reg & WM8958_MICD_VALID)) { 3048 if (!(reg & WM8958_MICD_VALID)) {
3043 dev_dbg(codec->dev, "Mic detect data not valid\n"); 3049 dev_dbg(codec->dev, "Mic detect data not valid\n");
3044 goto out; 3050 goto out;
3045 } 3051 }
3052
3053 if (!(reg & WM8958_MICD_STS) || (reg & WM8958_MICD_LVL_MASK))
3054 break;
3055
3056 msleep(1);
3057 } while (count--);
3058
3059 if (count == 0)
3060 dev_warn(codec->dev, "No impedence range reported for jack\n");
3046 3061
3047#ifndef CONFIG_SND_SOC_WM8994_MODULE 3062#ifndef CONFIG_SND_SOC_WM8994_MODULE
3048 trace_snd_soc_jack_irq(dev_name(codec->dev)); 3063 trace_snd_soc_jack_irq(dev_name(codec->dev));
@@ -3180,9 +3195,9 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
3180 3195
3181 wm8994_request_irq(codec->control_data, WM8994_IRQ_FIFOS_ERR, 3196 wm8994_request_irq(codec->control_data, WM8994_IRQ_FIFOS_ERR,
3182 wm8994_fifo_error, "FIFO error", codec); 3197 wm8994_fifo_error, "FIFO error", codec);
3183 wm8994_request_irq(wm8994->control_data, WM8994_IRQ_TEMP_WARN, 3198 wm8994_request_irq(codec->control_data, WM8994_IRQ_TEMP_WARN,
3184 wm8994_temp_warn, "Thermal warning", codec); 3199 wm8994_temp_warn, "Thermal warning", codec);
3185 wm8994_request_irq(wm8994->control_data, WM8994_IRQ_TEMP_SHUT, 3200 wm8994_request_irq(codec->control_data, WM8994_IRQ_TEMP_SHUT,
3186 wm8994_temp_shut, "Thermal shutdown", codec); 3201 wm8994_temp_shut, "Thermal shutdown", codec);
3187 3202
3188 ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_DCS_DONE, 3203 ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_DCS_DONE,
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 60f65ace7474..ab23869c01bb 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -765,10 +765,61 @@ static void usb_mixer_elem_free(struct snd_kcontrol *kctl)
765 * interface to ALSA control for feature/mixer units 765 * interface to ALSA control for feature/mixer units
766 */ 766 */
767 767
768/* volume control quirks */
769static void volume_control_quirks(struct usb_mixer_elem_info *cval,
770 struct snd_kcontrol *kctl)
771{
772 switch (cval->mixer->chip->usb_id) {
773 case USB_ID(0x0471, 0x0101):
774 case USB_ID(0x0471, 0x0104):
775 case USB_ID(0x0471, 0x0105):
776 case USB_ID(0x0672, 0x1041):
777 /* quirk for UDA1321/N101.
778 * note that detection between firmware 2.1.1.7 (N101)
779 * and later 2.1.1.21 is not very clear from datasheets.
780 * I hope that the min value is -15360 for newer firmware --jk
781 */
782 if (!strcmp(kctl->id.name, "PCM Playback Volume") &&
783 cval->min == -15616) {
784 snd_printk(KERN_INFO
785 "set volume quirk for UDA1321/N101 chip\n");
786 cval->max = -256;
787 }
788 break;
789
790 case USB_ID(0x046d, 0x09a4):
791 if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
792 snd_printk(KERN_INFO
793 "set volume quirk for QuickCam E3500\n");
794 cval->min = 6080;
795 cval->max = 8768;
796 cval->res = 192;
797 }
798 break;
799
800 case USB_ID(0x046d, 0x0808):
801 case USB_ID(0x046d, 0x0809):
802 case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
803 case USB_ID(0x046d, 0x0991):
804 /* Most audio usb devices lie about volume resolution.
805 * Most Logitech webcams have res = 384.
806 * Proboly there is some logitech magic behind this number --fishor
807 */
808 if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
809 snd_printk(KERN_INFO
810 "set resolution quirk: cval->res = 384\n");
811 cval->res = 384;
812 }
813 break;
814
815 }
816}
817
768/* 818/*
769 * retrieve the minimum and maximum values for the specified control 819 * retrieve the minimum and maximum values for the specified control
770 */ 820 */
771static int get_min_max(struct usb_mixer_elem_info *cval, int default_min) 821static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
822 int default_min, struct snd_kcontrol *kctl)
772{ 823{
773 /* for failsafe */ 824 /* for failsafe */
774 cval->min = default_min; 825 cval->min = default_min;
@@ -844,6 +895,9 @@ static int get_min_max(struct usb_mixer_elem_info *cval, int default_min)
844 cval->initialized = 1; 895 cval->initialized = 1;
845 } 896 }
846 897
898 if (kctl)
899 volume_control_quirks(cval, kctl);
900
847 /* USB descriptions contain the dB scale in 1/256 dB unit 901 /* USB descriptions contain the dB scale in 1/256 dB unit
848 * while ALSA TLV contains in 1/100 dB unit 902 * while ALSA TLV contains in 1/100 dB unit
849 */ 903 */
@@ -864,6 +918,7 @@ static int get_min_max(struct usb_mixer_elem_info *cval, int default_min)
864 return 0; 918 return 0;
865} 919}
866 920
921#define get_min_max(cval, def) get_min_max_with_quirks(cval, def, NULL)
867 922
868/* get a feature/mixer unit info */ 923/* get a feature/mixer unit info */
869static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) 924static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
@@ -882,7 +937,7 @@ static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_
882 uinfo->value.integer.max = 1; 937 uinfo->value.integer.max = 1;
883 } else { 938 } else {
884 if (!cval->initialized) { 939 if (!cval->initialized) {
885 get_min_max(cval, 0); 940 get_min_max_with_quirks(cval, 0, kcontrol);
886 if (cval->initialized && cval->dBmin >= cval->dBmax) { 941 if (cval->initialized && cval->dBmin >= cval->dBmax) {
887 kcontrol->vd[0].access &= 942 kcontrol->vd[0].access &=
888 ~(SNDRV_CTL_ELEM_ACCESS_TLV_READ | 943 ~(SNDRV_CTL_ELEM_ACCESS_TLV_READ |
@@ -1045,9 +1100,6 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
1045 cval->ch_readonly = readonly_mask; 1100 cval->ch_readonly = readonly_mask;
1046 } 1101 }
1047 1102
1048 /* get min/max values */
1049 get_min_max(cval, 0);
1050
1051 /* if all channels in the mask are marked read-only, make the control 1103 /* if all channels in the mask are marked read-only, make the control
1052 * read-only. set_cur_mix_value() will check the mask again and won't 1104 * read-only. set_cur_mix_value() will check the mask again and won't
1053 * issue write commands to read-only channels. */ 1105 * issue write commands to read-only channels. */
@@ -1069,6 +1121,9 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
1069 len = snd_usb_copy_string_desc(state, nameid, 1121 len = snd_usb_copy_string_desc(state, nameid,
1070 kctl->id.name, sizeof(kctl->id.name)); 1122 kctl->id.name, sizeof(kctl->id.name));
1071 1123
1124 /* get min/max values */
1125 get_min_max_with_quirks(cval, 0, kctl);
1126
1072 switch (control) { 1127 switch (control) {
1073 case UAC_FU_MUTE: 1128 case UAC_FU_MUTE:
1074 case UAC_FU_VOLUME: 1129 case UAC_FU_VOLUME:
@@ -1118,51 +1173,6 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
1118 break; 1173 break;
1119 } 1174 }
1120 1175
1121 /* volume control quirks */
1122 switch (state->chip->usb_id) {
1123 case USB_ID(0x0471, 0x0101):
1124 case USB_ID(0x0471, 0x0104):
1125 case USB_ID(0x0471, 0x0105):
1126 case USB_ID(0x0672, 0x1041):
1127 /* quirk for UDA1321/N101.
1128 * note that detection between firmware 2.1.1.7 (N101)
1129 * and later 2.1.1.21 is not very clear from datasheets.
1130 * I hope that the min value is -15360 for newer firmware --jk
1131 */
1132 if (!strcmp(kctl->id.name, "PCM Playback Volume") &&
1133 cval->min == -15616) {
1134 snd_printk(KERN_INFO
1135 "set volume quirk for UDA1321/N101 chip\n");
1136 cval->max = -256;
1137 }
1138 break;
1139
1140 case USB_ID(0x046d, 0x09a4):
1141 if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
1142 snd_printk(KERN_INFO
1143 "set volume quirk for QuickCam E3500\n");
1144 cval->min = 6080;
1145 cval->max = 8768;
1146 cval->res = 192;
1147 }
1148 break;
1149
1150 case USB_ID(0x046d, 0x0808):
1151 case USB_ID(0x046d, 0x0809):
1152 case USB_ID(0x046d, 0x0991):
1153 /* Most audio usb devices lie about volume resolution.
1154 * Most Logitech webcams have res = 384.
1155 * Proboly there is some logitech magic behind this number --fishor
1156 */
1157 if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
1158 snd_printk(KERN_INFO
1159 "set resolution quirk: cval->res = 384\n");
1160 cval->res = 384;
1161 }
1162 break;
1163
1164 }
1165
1166 range = (cval->max - cval->min) / cval->res; 1176 range = (cval->max - cval->min) / cval->res;
1167 /* Are there devices with volume range more than 255? I use a bit more 1177 /* Are there devices with volume range more than 255? I use a bit more
1168 * to be sure. 384 is a resolution magic number found on Logitech 1178 * to be sure. 384 is a resolution magic number found on Logitech
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 2e5bc7344026..a3ddac0deffd 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -137,12 +137,12 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
137 return -ENOMEM; 137 return -ENOMEM;
138 } 138 }
139 if (fp->nr_rates > 0) { 139 if (fp->nr_rates > 0) {
140 rate_table = kmalloc(sizeof(int) * fp->nr_rates, GFP_KERNEL); 140 rate_table = kmemdup(fp->rate_table,
141 sizeof(int) * fp->nr_rates, GFP_KERNEL);
141 if (!rate_table) { 142 if (!rate_table) {
142 kfree(fp); 143 kfree(fp);
143 return -ENOMEM; 144 return -ENOMEM;
144 } 145 }
145 memcpy(rate_table, fp->rate_table, sizeof(int) * fp->nr_rates);
146 fp->rate_table = rate_table; 146 fp->rate_table = rate_table;
147 } 147 }
148 148
@@ -224,10 +224,9 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
224 if (altsd->bNumEndpoints != 1) 224 if (altsd->bNumEndpoints != 1)
225 return -ENXIO; 225 return -ENXIO;
226 226
227 fp = kmalloc(sizeof(*fp), GFP_KERNEL); 227 fp = kmemdup(&ua_format, sizeof(*fp), GFP_KERNEL);
228 if (!fp) 228 if (!fp)
229 return -ENOMEM; 229 return -ENOMEM;
230 memcpy(fp, &ua_format, sizeof(*fp));
231 230
232 fp->iface = altsd->bInterfaceNumber; 231 fp->iface = altsd->bInterfaceNumber;
233 fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress; 232 fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f82480fa7f27..6ab58cc99d53 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -262,13 +262,16 @@ static bool perf_evlist__equal(struct perf_evlist *evlist,
262 262
263static void open_counters(struct perf_evlist *evlist) 263static void open_counters(struct perf_evlist *evlist)
264{ 264{
265 struct perf_evsel *pos; 265 struct perf_evsel *pos, *first;
266 266
267 if (evlist->cpus->map[0] < 0) 267 if (evlist->cpus->map[0] < 0)
268 no_inherit = true; 268 no_inherit = true;
269 269
270 first = list_entry(evlist->entries.next, struct perf_evsel, node);
271
270 list_for_each_entry(pos, &evlist->entries, node) { 272 list_for_each_entry(pos, &evlist->entries, node) {
271 struct perf_event_attr *attr = &pos->attr; 273 struct perf_event_attr *attr = &pos->attr;
274 struct xyarray *group_fd = NULL;
272 /* 275 /*
273 * Check if parse_single_tracepoint_event has already asked for 276 * Check if parse_single_tracepoint_event has already asked for
274 * PERF_SAMPLE_TIME. 277 * PERF_SAMPLE_TIME.
@@ -283,15 +286,19 @@ static void open_counters(struct perf_evlist *evlist)
283 */ 286 */
284 bool time_needed = attr->sample_type & PERF_SAMPLE_TIME; 287 bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
285 288
289 if (group && pos != first)
290 group_fd = first->fd;
291
286 config_attr(pos, evlist); 292 config_attr(pos, evlist);
287retry_sample_id: 293retry_sample_id:
288 attr->sample_id_all = sample_id_all_avail ? 1 : 0; 294 attr->sample_id_all = sample_id_all_avail ? 1 : 0;
289try_again: 295try_again:
290 if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group) < 0) { 296 if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group,
297 group_fd) < 0) {
291 int err = errno; 298 int err = errno;
292 299
293 if (err == EPERM || err == EACCES) { 300 if (err == EPERM || err == EACCES) {
294 ui__warning_paranoid(); 301 ui__error_paranoid();
295 exit(EXIT_FAILURE); 302 exit(EXIT_FAILURE);
296 } else if (err == ENODEV && cpu_list) { 303 } else if (err == ENODEV && cpu_list) {
297 die("No such device - did you specify" 304 die("No such device - did you specify"
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 7ce65f52415e..7d98676808d8 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -278,9 +278,14 @@ struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
278struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS]; 278struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
279struct stats walltime_nsecs_stats; 279struct stats walltime_nsecs_stats;
280 280
281static int create_perf_stat_counter(struct perf_evsel *evsel) 281static int create_perf_stat_counter(struct perf_evsel *evsel,
282 struct perf_evsel *first)
282{ 283{
283 struct perf_event_attr *attr = &evsel->attr; 284 struct perf_event_attr *attr = &evsel->attr;
285 struct xyarray *group_fd = NULL;
286
287 if (group && evsel != first)
288 group_fd = first->fd;
284 289
285 if (scale) 290 if (scale)
286 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 291 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
@@ -289,14 +294,15 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
289 attr->inherit = !no_inherit; 294 attr->inherit = !no_inherit;
290 295
291 if (system_wide) 296 if (system_wide)
292 return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, group); 297 return perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
293 298 group, group_fd);
294 if (target_pid == -1 && target_tid == -1) { 299 if (target_pid == -1 && target_tid == -1) {
295 attr->disabled = 1; 300 attr->disabled = 1;
296 attr->enable_on_exec = 1; 301 attr->enable_on_exec = 1;
297 } 302 }
298 303
299 return perf_evsel__open_per_thread(evsel, evsel_list->threads, group); 304 return perf_evsel__open_per_thread(evsel, evsel_list->threads,
305 group, group_fd);
300} 306}
301 307
302/* 308/*
@@ -396,7 +402,7 @@ static int read_counter(struct perf_evsel *counter)
396static int run_perf_stat(int argc __used, const char **argv) 402static int run_perf_stat(int argc __used, const char **argv)
397{ 403{
398 unsigned long long t0, t1; 404 unsigned long long t0, t1;
399 struct perf_evsel *counter; 405 struct perf_evsel *counter, *first;
400 int status = 0; 406 int status = 0;
401 int child_ready_pipe[2], go_pipe[2]; 407 int child_ready_pipe[2], go_pipe[2];
402 const bool forks = (argc > 0); 408 const bool forks = (argc > 0);
@@ -453,8 +459,10 @@ static int run_perf_stat(int argc __used, const char **argv)
453 close(child_ready_pipe[0]); 459 close(child_ready_pipe[0]);
454 } 460 }
455 461
462 first = list_entry(evsel_list->entries.next, struct perf_evsel, node);
463
456 list_for_each_entry(counter, &evsel_list->entries, node) { 464 list_for_each_entry(counter, &evsel_list->entries, node) {
457 if (create_perf_stat_counter(counter) < 0) { 465 if (create_perf_stat_counter(counter, first) < 0) {
458 if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) { 466 if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) {
459 if (verbose) 467 if (verbose)
460 ui__warning("%s event is not supported by the kernel.\n", 468 ui__warning("%s event is not supported by the kernel.\n",
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index efe696f936e2..831d1baeac37 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -291,7 +291,7 @@ static int test__open_syscall_event(void)
291 goto out_thread_map_delete; 291 goto out_thread_map_delete;
292 } 292 }
293 293
294 if (perf_evsel__open_per_thread(evsel, threads, false) < 0) { 294 if (perf_evsel__open_per_thread(evsel, threads, false, NULL) < 0) {
295 pr_debug("failed to open counter: %s, " 295 pr_debug("failed to open counter: %s, "
296 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 296 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
297 strerror(errno)); 297 strerror(errno));
@@ -366,7 +366,7 @@ static int test__open_syscall_event_on_all_cpus(void)
366 goto out_thread_map_delete; 366 goto out_thread_map_delete;
367 } 367 }
368 368
369 if (perf_evsel__open(evsel, cpus, threads, false) < 0) { 369 if (perf_evsel__open(evsel, cpus, threads, false, NULL) < 0) {
370 pr_debug("failed to open counter: %s, " 370 pr_debug("failed to open counter: %s, "
371 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 371 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
372 strerror(errno)); 372 strerror(errno));
@@ -531,7 +531,7 @@ static int test__basic_mmap(void)
531 531
532 perf_evlist__add(evlist, evsels[i]); 532 perf_evlist__add(evlist, evsels[i]);
533 533
534 if (perf_evsel__open(evsels[i], cpus, threads, false) < 0) { 534 if (perf_evsel__open(evsels[i], cpus, threads, false, NULL) < 0) {
535 pr_debug("failed to open counter: %s, " 535 pr_debug("failed to open counter: %s, "
536 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 536 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
537 strerror(errno)); 537 strerror(errno));
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 7a871714d44e..c9cdedb58134 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -89,6 +89,7 @@ static bool vmlinux_warned;
89static bool inherit = false; 89static bool inherit = false;
90static int realtime_prio = 0; 90static int realtime_prio = 0;
91static bool group = false; 91static bool group = false;
92static bool sample_id_all_avail = true;
92static unsigned int mmap_pages = 128; 93static unsigned int mmap_pages = 128;
93 94
94static bool dump_symtab = false; 95static bool dump_symtab = false;
@@ -199,7 +200,8 @@ static void record_precise_ip(struct hist_entry *he, int counter, u64 ip)
199 struct symbol *sym; 200 struct symbol *sym;
200 201
201 if (he == NULL || he->ms.sym == NULL || 202 if (he == NULL || he->ms.sym == NULL ||
202 (he != top.sym_filter_entry && use_browser != 1)) 203 ((top.sym_filter_entry == NULL ||
204 top.sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1))
203 return; 205 return;
204 206
205 sym = he->ms.sym; 207 sym = he->ms.sym;
@@ -289,11 +291,13 @@ static void print_sym_table(void)
289 291
290 printf("%-*.*s\n", win_width, win_width, graph_dotted_line); 292 printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
291 293
292 if (top.total_lost_warned != top.session->hists.stats.total_lost) { 294 if (top.sym_evsel->hists.stats.nr_lost_warned !=
293 top.total_lost_warned = top.session->hists.stats.total_lost; 295 top.sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]) {
294 color_fprintf(stdout, PERF_COLOR_RED, "WARNING:"); 296 top.sym_evsel->hists.stats.nr_lost_warned =
295 printf(" LOST %" PRIu64 " events, Check IO/CPU overload\n", 297 top.sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST];
296 top.total_lost_warned); 298 color_fprintf(stdout, PERF_COLOR_RED,
299 "WARNING: LOST %d chunks, Check IO/CPU overload",
300 top.sym_evsel->hists.stats.nr_lost_warned);
297 ++printed; 301 ++printed;
298 } 302 }
299 303
@@ -561,7 +565,6 @@ static void perf_top__sort_new_samples(void *arg)
561 hists__decay_entries_threaded(&t->sym_evsel->hists, 565 hists__decay_entries_threaded(&t->sym_evsel->hists,
562 top.hide_user_symbols, 566 top.hide_user_symbols,
563 top.hide_kernel_symbols); 567 top.hide_kernel_symbols);
564 hists__output_recalc_col_len(&t->sym_evsel->hists, winsize.ws_row - 3);
565} 568}
566 569
567static void *display_thread_tui(void *arg __used) 570static void *display_thread_tui(void *arg __used)
@@ -671,6 +674,7 @@ static int symbol_filter(struct map *map __used, struct symbol *sym)
671} 674}
672 675
673static void perf_event__process_sample(const union perf_event *event, 676static void perf_event__process_sample(const union perf_event *event,
677 struct perf_evsel *evsel,
674 struct perf_sample *sample, 678 struct perf_sample *sample,
675 struct perf_session *session) 679 struct perf_session *session)
676{ 680{
@@ -770,12 +774,8 @@ static void perf_event__process_sample(const union perf_event *event,
770 } 774 }
771 775
772 if (al.sym == NULL || !al.sym->ignore) { 776 if (al.sym == NULL || !al.sym->ignore) {
773 struct perf_evsel *evsel;
774 struct hist_entry *he; 777 struct hist_entry *he;
775 778
776 evsel = perf_evlist__id2evsel(top.evlist, sample->id);
777 assert(evsel != NULL);
778
779 if ((sort__has_parent || symbol_conf.use_callchain) && 779 if ((sort__has_parent || symbol_conf.use_callchain) &&
780 sample->callchain) { 780 sample->callchain) {
781 err = perf_session__resolve_callchain(session, al.thread, 781 err = perf_session__resolve_callchain(session, al.thread,
@@ -807,6 +807,7 @@ static void perf_event__process_sample(const union perf_event *event,
807static void perf_session__mmap_read_idx(struct perf_session *self, int idx) 807static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
808{ 808{
809 struct perf_sample sample; 809 struct perf_sample sample;
810 struct perf_evsel *evsel;
810 union perf_event *event; 811 union perf_event *event;
811 int ret; 812 int ret;
812 813
@@ -817,10 +818,16 @@ static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
817 continue; 818 continue;
818 } 819 }
819 820
821 evsel = perf_evlist__id2evsel(self->evlist, sample.id);
822 assert(evsel != NULL);
823
820 if (event->header.type == PERF_RECORD_SAMPLE) 824 if (event->header.type == PERF_RECORD_SAMPLE)
821 perf_event__process_sample(event, &sample, self); 825 perf_event__process_sample(event, evsel, &sample, self);
822 else 826 else if (event->header.type < PERF_RECORD_MAX) {
827 hists__inc_nr_events(&evsel->hists, event->header.type);
823 perf_event__process(event, &sample, self); 828 perf_event__process(event, &sample, self);
829 } else
830 ++self->hists.stats.nr_unknown_events;
824 } 831 }
825} 832}
826 833
@@ -834,10 +841,16 @@ static void perf_session__mmap_read(struct perf_session *self)
834 841
835static void start_counters(struct perf_evlist *evlist) 842static void start_counters(struct perf_evlist *evlist)
836{ 843{
837 struct perf_evsel *counter; 844 struct perf_evsel *counter, *first;
845
846 first = list_entry(evlist->entries.next, struct perf_evsel, node);
838 847
839 list_for_each_entry(counter, &evlist->entries, node) { 848 list_for_each_entry(counter, &evlist->entries, node) {
840 struct perf_event_attr *attr = &counter->attr; 849 struct perf_event_attr *attr = &counter->attr;
850 struct xyarray *group_fd = NULL;
851
852 if (group && counter != first)
853 group_fd = first->fd;
841 854
842 attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; 855 attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
843 856
@@ -858,14 +871,23 @@ static void start_counters(struct perf_evlist *evlist)
858 attr->mmap = 1; 871 attr->mmap = 1;
859 attr->comm = 1; 872 attr->comm = 1;
860 attr->inherit = inherit; 873 attr->inherit = inherit;
874retry_sample_id:
875 attr->sample_id_all = sample_id_all_avail ? 1 : 0;
861try_again: 876try_again:
862 if (perf_evsel__open(counter, top.evlist->cpus, 877 if (perf_evsel__open(counter, top.evlist->cpus,
863 top.evlist->threads, group) < 0) { 878 top.evlist->threads, group,
879 group_fd) < 0) {
864 int err = errno; 880 int err = errno;
865 881
866 if (err == EPERM || err == EACCES) { 882 if (err == EPERM || err == EACCES) {
867 ui__warning_paranoid(); 883 ui__error_paranoid();
868 goto out_err; 884 goto out_err;
885 } else if (err == EINVAL && sample_id_all_avail) {
886 /*
887 * Old kernel, no attr->sample_id_type_all field
888 */
889 sample_id_all_avail = false;
890 goto retry_sample_id;
869 } 891 }
870 /* 892 /*
871 * If it's cycles then fall back to hrtimer 893 * If it's cycles then fall back to hrtimer
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index bc8f4773d4d8..119e996035c8 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -310,9 +310,12 @@ fallback:
310 } 310 }
311 err = -ENOENT; 311 err = -ENOENT;
312 dso->annotate_warned = 1; 312 dso->annotate_warned = 1;
313 pr_err("Can't annotate %s: No vmlinux file%s was found in the " 313 pr_err("Can't annotate %s:\n\n"
314 "path.\nPlease use 'perf buildid-cache -av vmlinux' or " 314 "No vmlinux file%s\nwas found in the path.\n\n"
315 "--vmlinux vmlinux.\n", 315 "Please use:\n\n"
316 " perf buildid-cache -av vmlinux\n\n"
317 "or:\n\n"
318 " --vmlinux vmlinux",
316 sym->name, build_id_msg ?: ""); 319 sym->name, build_id_msg ?: "");
317 goto out_free_filename; 320 goto out_free_filename;
318 } 321 }
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 155749d74350..26817daa2961 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -47,19 +47,20 @@ int dump_printf(const char *fmt, ...)
47} 47}
48 48
49#ifdef NO_NEWT_SUPPORT 49#ifdef NO_NEWT_SUPPORT
50void ui__warning(const char *format, ...) 50int ui__warning(const char *format, ...)
51{ 51{
52 va_list args; 52 va_list args;
53 53
54 va_start(args, format); 54 va_start(args, format);
55 vfprintf(stderr, format, args); 55 vfprintf(stderr, format, args);
56 va_end(args); 56 va_end(args);
57 return 0;
57} 58}
58#endif 59#endif
59 60
60void ui__warning_paranoid(void) 61int ui__error_paranoid(void)
61{ 62{
62 ui__warning("Permission error - are you root?\n" 63 return ui__error("Permission error - are you root?\n"
63 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" 64 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
64 " -1 - Not paranoid at all\n" 65 " -1 - Not paranoid at all\n"
65 " 0 - Disallow raw tracepoint access for unpriv\n" 66 " 0 - Disallow raw tracepoint access for unpriv\n"
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index fd53db47e3de..f2ce88d04f54 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -19,23 +19,18 @@ static inline int ui_helpline__show_help(const char *format __used, va_list ap _
19 return 0; 19 return 0;
20} 20}
21 21
22static inline struct ui_progress *ui_progress__new(const char *title __used, 22static inline void ui_progress__update(u64 curr __used, u64 total __used,
23 u64 total __used) 23 const char *title __used) {}
24{
25 return (struct ui_progress *)1;
26}
27
28static inline void ui_progress__update(struct ui_progress *self __used,
29 u64 curr __used) {}
30 24
31static inline void ui_progress__delete(struct ui_progress *self __used) {} 25#define ui__error(format, arg...) ui__warning(format, ##arg)
32#else 26#else
33extern char ui_helpline__last_msg[]; 27extern char ui_helpline__last_msg[];
34int ui_helpline__show_help(const char *format, va_list ap); 28int ui_helpline__show_help(const char *format, va_list ap);
35#include "ui/progress.h" 29#include "ui/progress.h"
30int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
36#endif 31#endif
37 32
38void ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2))); 33int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
39void ui__warning_paranoid(void); 34int ui__error_paranoid(void);
40 35
41#endif /* __PERF_DEBUG_H */ 36#endif /* __PERF_DEBUG_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 2f6bc89027da..fbb4b4ab9cc6 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -539,3 +539,33 @@ void perf_evlist__set_selected(struct perf_evlist *evlist,
539{ 539{
540 evlist->selected = evsel; 540 evlist->selected = evsel;
541} 541}
542
543int perf_evlist__open(struct perf_evlist *evlist, bool group)
544{
545 struct perf_evsel *evsel, *first;
546 int err, ncpus, nthreads;
547
548 first = list_entry(evlist->entries.next, struct perf_evsel, node);
549
550 list_for_each_entry(evsel, &evlist->entries, node) {
551 struct xyarray *group_fd = NULL;
552
553 if (group && evsel != first)
554 group_fd = first->fd;
555
556 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
557 group, group_fd);
558 if (err < 0)
559 goto out_err;
560 }
561
562 return 0;
563out_err:
564 ncpus = evlist->cpus ? evlist->cpus->nr : 1;
565 nthreads = evlist->threads ? evlist->threads->nr : 1;
566
567 list_for_each_entry_reverse(evsel, &evlist->entries, node)
568 perf_evsel__close(evsel, ncpus, nthreads);
569
570 return err;
571}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 6be71fc57794..1779ffef7828 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -50,6 +50,8 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
50 50
51union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); 51union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
52 52
53int perf_evlist__open(struct perf_evlist *evlist, bool group);
54
53int perf_evlist__alloc_mmap(struct perf_evlist *evlist); 55int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
54int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); 56int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
55void perf_evlist__munmap(struct perf_evlist *evlist); 57void perf_evlist__munmap(struct perf_evlist *evlist);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index b46f6e4bff3c..e42626422587 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -16,6 +16,7 @@
16#include "thread_map.h" 16#include "thread_map.h"
17 17
18#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 18#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
19#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
19 20
20int __perf_evsel__sample_size(u64 sample_type) 21int __perf_evsel__sample_size(u64 sample_type)
21{ 22{
@@ -204,15 +205,16 @@ int __perf_evsel__read(struct perf_evsel *evsel,
204} 205}
205 206
206static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 207static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
207 struct thread_map *threads, bool group) 208 struct thread_map *threads, bool group,
209 struct xyarray *group_fds)
208{ 210{
209 int cpu, thread; 211 int cpu, thread;
210 unsigned long flags = 0; 212 unsigned long flags = 0;
211 int pid = -1; 213 int pid = -1, err;
212 214
213 if (evsel->fd == NULL && 215 if (evsel->fd == NULL &&
214 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) 216 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
215 return -1; 217 return -ENOMEM;
216 218
217 if (evsel->cgrp) { 219 if (evsel->cgrp) {
218 flags = PERF_FLAG_PID_CGROUP; 220 flags = PERF_FLAG_PID_CGROUP;
@@ -220,7 +222,7 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
220 } 222 }
221 223
222 for (cpu = 0; cpu < cpus->nr; cpu++) { 224 for (cpu = 0; cpu < cpus->nr; cpu++) {
223 int group_fd = -1; 225 int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
224 226
225 for (thread = 0; thread < threads->nr; thread++) { 227 for (thread = 0; thread < threads->nr; thread++) {
226 228
@@ -231,8 +233,10 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
231 pid, 233 pid,
232 cpus->map[cpu], 234 cpus->map[cpu],
233 group_fd, flags); 235 group_fd, flags);
234 if (FD(evsel, cpu, thread) < 0) 236 if (FD(evsel, cpu, thread) < 0) {
237 err = -errno;
235 goto out_close; 238 goto out_close;
239 }
236 240
237 if (group && group_fd == -1) 241 if (group && group_fd == -1)
238 group_fd = FD(evsel, cpu, thread); 242 group_fd = FD(evsel, cpu, thread);
@@ -249,7 +253,17 @@ out_close:
249 } 253 }
250 thread = threads->nr; 254 thread = threads->nr;
251 } while (--cpu >= 0); 255 } while (--cpu >= 0);
252 return -1; 256 return err;
257}
258
259void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
260{
261 if (evsel->fd == NULL)
262 return;
263
264 perf_evsel__close_fd(evsel, ncpus, nthreads);
265 perf_evsel__free_fd(evsel);
266 evsel->fd = NULL;
253} 267}
254 268
255static struct { 269static struct {
@@ -269,7 +283,8 @@ static struct {
269}; 283};
270 284
271int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 285int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
272 struct thread_map *threads, bool group) 286 struct thread_map *threads, bool group,
287 struct xyarray *group_fd)
273{ 288{
274 if (cpus == NULL) { 289 if (cpus == NULL) {
275 /* Work around old compiler warnings about strict aliasing */ 290 /* Work around old compiler warnings about strict aliasing */
@@ -279,19 +294,23 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
279 if (threads == NULL) 294 if (threads == NULL)
280 threads = &empty_thread_map.map; 295 threads = &empty_thread_map.map;
281 296
282 return __perf_evsel__open(evsel, cpus, threads, group); 297 return __perf_evsel__open(evsel, cpus, threads, group, group_fd);
283} 298}
284 299
285int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 300int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
286 struct cpu_map *cpus, bool group) 301 struct cpu_map *cpus, bool group,
302 struct xyarray *group_fd)
287{ 303{
288 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group); 304 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group,
305 group_fd);
289} 306}
290 307
291int perf_evsel__open_per_thread(struct perf_evsel *evsel, 308int perf_evsel__open_per_thread(struct perf_evsel *evsel,
292 struct thread_map *threads, bool group) 309 struct thread_map *threads, bool group,
310 struct xyarray *group_fd)
293{ 311{
294 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group); 312 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group,
313 group_fd);
295} 314}
296 315
297static int perf_event__parse_id_sample(const union perf_event *event, u64 type, 316static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index e9a31554e265..b1d15e6f7ae3 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -82,11 +82,15 @@ void perf_evsel__free_id(struct perf_evsel *evsel);
82void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 82void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
83 83
84int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 84int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
85 struct cpu_map *cpus, bool group); 85 struct cpu_map *cpus, bool group,
86 struct xyarray *group_fds);
86int perf_evsel__open_per_thread(struct perf_evsel *evsel, 87int perf_evsel__open_per_thread(struct perf_evsel *evsel,
87 struct thread_map *threads, bool group); 88 struct thread_map *threads, bool group,
89 struct xyarray *group_fds);
88int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 90int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
89 struct thread_map *threads, bool group); 91 struct thread_map *threads, bool group,
92 struct xyarray *group_fds);
93void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
90 94
91#define perf_evsel__match(evsel, t, c) \ 95#define perf_evsel__match(evsel, t, c) \
92 (evsel->attr.type == PERF_TYPE_##t && \ 96 (evsel->attr.type == PERF_TYPE_##t && \
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 76c0b2c49eb8..bcd05d05b4f0 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1,5 +1,6 @@
1#define _FILE_OFFSET_BITS 64 1#define _FILE_OFFSET_BITS 64
2 2
3#include "util.h"
3#include <sys/types.h> 4#include <sys/types.h>
4#include <byteswap.h> 5#include <byteswap.h>
5#include <unistd.h> 6#include <unistd.h>
@@ -11,7 +12,6 @@
11 12
12#include "evlist.h" 13#include "evlist.h"
13#include "evsel.h" 14#include "evsel.h"
14#include "util.h"
15#include "header.h" 15#include "header.h"
16#include "../perf.h" 16#include "../perf.h"
17#include "trace-event.h" 17#include "trace-event.h"
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index f6a993963a1e..a36a3fa81ffb 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -365,7 +365,6 @@ static void __hists__collapse_resort(struct hists *hists, bool threaded)
365 365
366 root = hists__get_rotate_entries_in(hists); 366 root = hists__get_rotate_entries_in(hists);
367 next = rb_first(root); 367 next = rb_first(root);
368 hists->stats.total_period = 0;
369 368
370 while (next) { 369 while (next) {
371 n = rb_entry(next, struct hist_entry, rb_node_in); 370 n = rb_entry(next, struct hist_entry, rb_node_in);
@@ -379,7 +378,6 @@ static void __hists__collapse_resort(struct hists *hists, bool threaded)
379 * been set by, say, the hist_browser. 378 * been set by, say, the hist_browser.
380 */ 379 */
381 hists__apply_filters(hists, n); 380 hists__apply_filters(hists, n);
382 hists__inc_nr_entries(hists, n);
383 } 381 }
384 } 382 }
385} 383}
@@ -442,6 +440,7 @@ static void __hists__output_resort(struct hists *hists, bool threaded)
442 hists->entries = RB_ROOT; 440 hists->entries = RB_ROOT;
443 441
444 hists->nr_entries = 0; 442 hists->nr_entries = 0;
443 hists->stats.total_period = 0;
445 hists__reset_col_len(hists); 444 hists__reset_col_len(hists);
446 445
447 while (next) { 446 while (next) {
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index ff93ddc91c5c..c86c1d27bd1e 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -28,6 +28,7 @@ struct events_stats {
28 u64 total_lost; 28 u64 total_lost;
29 u64 total_invalid_chains; 29 u64 total_invalid_chains;
30 u32 nr_events[PERF_RECORD_HEADER_MAX]; 30 u32 nr_events[PERF_RECORD_HEADER_MAX];
31 u32 nr_lost_warned;
31 u32 nr_unknown_events; 32 u32 nr_unknown_events;
32 u32 nr_invalid_chains; 33 u32 nr_invalid_chains;
33 u32 nr_unknown_id; 34 u32 nr_unknown_id;
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 7624324efad4..9dd47a4f2596 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -623,7 +623,11 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
623 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 623 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
624 624
625 evsel->attr.inherit = inherit; 625 evsel->attr.inherit = inherit;
626 if (perf_evsel__open(evsel, cpus, threads, group) < 0) { 626 /*
627 * This will group just the fds for this single evsel, to group
628 * multiple events, use evlist.open().
629 */
630 if (perf_evsel__open(evsel, cpus, threads, group, NULL) < 0) {
627 PyErr_SetFromErrno(PyExc_OSError); 631 PyErr_SetFromErrno(PyExc_OSError);
628 return NULL; 632 return NULL;
629 } 633 }
@@ -814,6 +818,25 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
814 return Py_None; 818 return Py_None;
815} 819}
816 820
821static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
822 PyObject *args, PyObject *kwargs)
823{
824 struct perf_evlist *evlist = &pevlist->evlist;
825 int group = 0;
826 static char *kwlist[] = { "group", NULL };
827
828 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, &group))
829 return NULL;
830
831 if (perf_evlist__open(evlist, group) < 0) {
832 PyErr_SetFromErrno(PyExc_OSError);
833 return NULL;
834 }
835
836 Py_INCREF(Py_None);
837 return Py_None;
838}
839
817static PyMethodDef pyrf_evlist__methods[] = { 840static PyMethodDef pyrf_evlist__methods[] = {
818 { 841 {
819 .ml_name = "mmap", 842 .ml_name = "mmap",
@@ -822,6 +845,12 @@ static PyMethodDef pyrf_evlist__methods[] = {
822 .ml_doc = PyDoc_STR("mmap the file descriptor table.") 845 .ml_doc = PyDoc_STR("mmap the file descriptor table.")
823 }, 846 },
824 { 847 {
848 .ml_name = "open",
849 .ml_meth = (PyCFunction)pyrf_evlist__open,
850 .ml_flags = METH_VARARGS | METH_KEYWORDS,
851 .ml_doc = PyDoc_STR("open the file descriptors.")
852 },
853 {
825 .ml_name = "poll", 854 .ml_name = "poll",
826 .ml_meth = (PyCFunction)pyrf_evlist__poll, 855 .ml_meth = (PyCFunction)pyrf_evlist__poll,
827 .ml_flags = METH_VARARGS | METH_KEYWORDS, 856 .ml_flags = METH_VARARGS | METH_KEYWORDS,
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 20e011c99a94..85c1e6b76f0a 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -502,6 +502,7 @@ static void flush_sample_queue(struct perf_session *s,
502 struct perf_sample sample; 502 struct perf_sample sample;
503 u64 limit = os->next_flush; 503 u64 limit = os->next_flush;
504 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; 504 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
505 unsigned idx = 0, progress_next = os->nr_samples / 16;
505 int ret; 506 int ret;
506 507
507 if (!ops->ordered_samples || !limit) 508 if (!ops->ordered_samples || !limit)
@@ -521,6 +522,11 @@ static void flush_sample_queue(struct perf_session *s,
521 os->last_flush = iter->timestamp; 522 os->last_flush = iter->timestamp;
522 list_del(&iter->list); 523 list_del(&iter->list);
523 list_add(&iter->list, &os->sample_cache); 524 list_add(&iter->list, &os->sample_cache);
525 if (++idx >= progress_next) {
526 progress_next += os->nr_samples / 16;
527 ui_progress__update(idx, os->nr_samples,
528 "Processing time ordered events...");
529 }
524 } 530 }
525 531
526 if (list_empty(head)) { 532 if (list_empty(head)) {
@@ -529,6 +535,8 @@ static void flush_sample_queue(struct perf_session *s,
529 os->last_sample = 535 os->last_sample =
530 list_entry(head->prev, struct sample_queue, list); 536 list_entry(head->prev, struct sample_queue, list);
531 } 537 }
538
539 os->nr_samples = 0;
532} 540}
533 541
534/* 542/*
@@ -588,6 +596,7 @@ static void __queue_event(struct sample_queue *new, struct perf_session *s)
588 u64 timestamp = new->timestamp; 596 u64 timestamp = new->timestamp;
589 struct list_head *p; 597 struct list_head *p;
590 598
599 ++os->nr_samples;
591 os->last_sample = new; 600 os->last_sample = new;
592 601
593 if (!sample) { 602 if (!sample) {
@@ -738,10 +747,27 @@ static int perf_session_deliver_event(struct perf_session *session,
738 747
739 dump_event(session, event, file_offset, sample); 748 dump_event(session, event, file_offset, sample);
740 749
750 evsel = perf_evlist__id2evsel(session->evlist, sample->id);
751 if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
752 /*
753 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
754 * because the tools right now may apply filters, discarding
755 * some of the samples. For consistency, in the future we
756 * should have something like nr_filtered_samples and remove
757 * the sample->period from total_sample_period, etc, KISS for
758 * now tho.
759 *
760 * Also testing against NULL allows us to handle files without
761 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
762 * future probably it'll be a good idea to restrict event
763 * processing via perf_session to files with both set.
764 */
765 hists__inc_nr_events(&evsel->hists, event->header.type);
766 }
767
741 switch (event->header.type) { 768 switch (event->header.type) {
742 case PERF_RECORD_SAMPLE: 769 case PERF_RECORD_SAMPLE:
743 dump_sample(session, event, sample); 770 dump_sample(session, event, sample);
744 evsel = perf_evlist__id2evsel(session->evlist, sample->id);
745 if (evsel == NULL) { 771 if (evsel == NULL) {
746 ++session->hists.stats.nr_unknown_id; 772 ++session->hists.stats.nr_unknown_id;
747 return -1; 773 return -1;
@@ -874,11 +900,11 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
874 const struct perf_event_ops *ops) 900 const struct perf_event_ops *ops)
875{ 901{
876 if (ops->lost == perf_event__process_lost && 902 if (ops->lost == perf_event__process_lost &&
877 session->hists.stats.total_lost != 0) { 903 session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
878 ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64 904 ui__warning("Processed %d events and lost %d chunks!\n\n"
879 "!\n\nCheck IO/CPU overload!\n\n", 905 "Check IO/CPU overload!\n\n",
880 session->hists.stats.total_period, 906 session->hists.stats.nr_events[0],
881 session->hists.stats.total_lost); 907 session->hists.stats.nr_events[PERF_RECORD_LOST]);
882 } 908 }
883 909
884 if (session->hists.stats.nr_unknown_events != 0) { 910 if (session->hists.stats.nr_unknown_events != 0) {
@@ -1012,7 +1038,6 @@ int __perf_session__process_events(struct perf_session *session,
1012{ 1038{
1013 u64 head, page_offset, file_offset, file_pos, progress_next; 1039 u64 head, page_offset, file_offset, file_pos, progress_next;
1014 int err, mmap_prot, mmap_flags, map_idx = 0; 1040 int err, mmap_prot, mmap_flags, map_idx = 0;
1015 struct ui_progress *progress;
1016 size_t page_size, mmap_size; 1041 size_t page_size, mmap_size;
1017 char *buf, *mmaps[8]; 1042 char *buf, *mmaps[8];
1018 union perf_event *event; 1043 union perf_event *event;
@@ -1030,9 +1055,6 @@ int __perf_session__process_events(struct perf_session *session,
1030 file_size = data_offset + data_size; 1055 file_size = data_offset + data_size;
1031 1056
1032 progress_next = file_size / 16; 1057 progress_next = file_size / 16;
1033 progress = ui_progress__new("Processing events...", file_size);
1034 if (progress == NULL)
1035 return -1;
1036 1058
1037 mmap_size = session->mmap_window; 1059 mmap_size = session->mmap_window;
1038 if (mmap_size > file_size) 1060 if (mmap_size > file_size)
@@ -1095,7 +1117,8 @@ more:
1095 1117
1096 if (file_pos >= progress_next) { 1118 if (file_pos >= progress_next) {
1097 progress_next += file_size / 16; 1119 progress_next += file_size / 16;
1098 ui_progress__update(progress, file_pos); 1120 ui_progress__update(file_pos, file_size,
1121 "Processing events...");
1099 } 1122 }
1100 1123
1101 if (file_pos < file_size) 1124 if (file_pos < file_size)
@@ -1106,7 +1129,6 @@ more:
1106 session->ordered_samples.next_flush = ULLONG_MAX; 1129 session->ordered_samples.next_flush = ULLONG_MAX;
1107 flush_sample_queue(session, ops); 1130 flush_sample_queue(session, ops);
1108out_err: 1131out_err:
1109 ui_progress__delete(progress);
1110 perf_session__warn_about_errors(session, ops); 1132 perf_session__warn_about_errors(session, ops);
1111 perf_session_free_sample_buffers(session); 1133 perf_session_free_sample_buffers(session);
1112 return err; 1134 return err;
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 514b06d41f05..6e393c98eb34 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -23,6 +23,7 @@ struct ordered_samples {
23 struct sample_queue *sample_buffer; 23 struct sample_queue *sample_buffer;
24 struct sample_queue *last_sample; 24 struct sample_queue *last_sample;
25 int sample_buffer_idx; 25 int sample_buffer_idx;
26 unsigned int nr_samples;
26}; 27};
27 28
28struct perf_session { 29struct perf_session {
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 01d1057f3074..399650967958 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -19,7 +19,6 @@ struct perf_top {
19 u64 kernel_samples, us_samples; 19 u64 kernel_samples, us_samples;
20 u64 exact_samples; 20 u64 exact_samples;
21 u64 guest_us_samples, guest_kernel_samples; 21 u64 guest_us_samples, guest_kernel_samples;
22 u64 total_lost_warned;
23 int print_entries, count_filter, delay_secs; 22 int print_entries, count_filter, delay_secs;
24 int freq; 23 int freq;
25 pid_t target_pid, target_tid; 24 pid_t target_pid, target_tid;
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 2d530cf74f43..d2655f08bcc0 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -80,7 +80,7 @@ static void die(const char *fmt, ...)
80 int ret = errno; 80 int ret = errno;
81 81
82 if (errno) 82 if (errno)
83 perror("trace-cmd"); 83 perror("perf");
84 else 84 else
85 ret = -1; 85 ret = -1;
86 86
diff --git a/tools/perf/util/ui/browser.c b/tools/perf/util/ui/browser.c
index 5359f371d30a..556829124b02 100644
--- a/tools/perf/util/ui/browser.c
+++ b/tools/perf/util/ui/browser.c
@@ -4,6 +4,7 @@
4#include "libslang.h" 4#include "libslang.h"
5#include <newt.h> 5#include <newt.h>
6#include "ui.h" 6#include "ui.h"
7#include "util.h"
7#include <linux/compiler.h> 8#include <linux/compiler.h>
8#include <linux/list.h> 9#include <linux/list.h>
9#include <linux/rbtree.h> 10#include <linux/rbtree.h>
@@ -168,6 +169,59 @@ void ui_browser__refresh_dimensions(struct ui_browser *self)
168 self->x = 0; 169 self->x = 0;
169} 170}
170 171
172void ui_browser__handle_resize(struct ui_browser *browser)
173{
174 ui__refresh_dimensions(false);
175 ui_browser__show(browser, browser->title, ui_helpline__current);
176 ui_browser__refresh(browser);
177}
178
179int ui_browser__warning(struct ui_browser *browser, int timeout,
180 const char *format, ...)
181{
182 va_list args;
183 char *text;
184 int key = 0, err;
185
186 va_start(args, format);
187 err = vasprintf(&text, format, args);
188 va_end(args);
189
190 if (err < 0) {
191 va_start(args, format);
192 ui_helpline__vpush(format, args);
193 va_end(args);
194 } else {
195 while ((key == ui__question_window("Warning!", text,
196 "Press any key...",
197 timeout)) == K_RESIZE)
198 ui_browser__handle_resize(browser);
199 free(text);
200 }
201
202 return key;
203}
204
205int ui_browser__help_window(struct ui_browser *browser, const char *text)
206{
207 int key;
208
209 while ((key = ui__help_window(text)) == K_RESIZE)
210 ui_browser__handle_resize(browser);
211
212 return key;
213}
214
215bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text)
216{
217 int key;
218
219 while ((key = ui__dialog_yesno(text)) == K_RESIZE)
220 ui_browser__handle_resize(browser);
221
222 return key == K_ENTER || toupper(key) == 'Y';
223}
224
171void ui_browser__reset_index(struct ui_browser *self) 225void ui_browser__reset_index(struct ui_browser *self)
172{ 226{
173 self->index = self->top_idx = 0; 227 self->index = self->top_idx = 0;
@@ -230,13 +284,15 @@ static void ui_browser__scrollbar_set(struct ui_browser *browser)
230 (browser->nr_entries - 1)); 284 (browser->nr_entries - 1));
231 } 285 }
232 286
287 SLsmg_set_char_set(1);
288
233 while (h < height) { 289 while (h < height) {
234 ui_browser__gotorc(browser, row++, col); 290 ui_browser__gotorc(browser, row++, col);
235 SLsmg_set_char_set(1); 291 SLsmg_write_char(h == pct ? SLSMG_DIAMOND_CHAR : SLSMG_CKBRD_CHAR);
236 SLsmg_write_char(h == pct ? SLSMG_DIAMOND_CHAR : SLSMG_BOARD_CHAR);
237 SLsmg_set_char_set(0);
238 ++h; 292 ++h;
239 } 293 }
294
295 SLsmg_set_char_set(0);
240} 296}
241 297
242static int __ui_browser__refresh(struct ui_browser *browser) 298static int __ui_browser__refresh(struct ui_browser *browser)
@@ -291,53 +347,10 @@ void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries)
291 browser->seek(browser, browser->top_idx, SEEK_SET); 347 browser->seek(browser, browser->top_idx, SEEK_SET);
292} 348}
293 349
294static int ui__getch(int delay_secs)
295{
296 struct timeval timeout, *ptimeout = delay_secs ? &timeout : NULL;
297 fd_set read_set;
298 int err, key;
299
300 FD_ZERO(&read_set);
301 FD_SET(0, &read_set);
302
303 if (delay_secs) {
304 timeout.tv_sec = delay_secs;
305 timeout.tv_usec = 0;
306 }
307
308 err = select(1, &read_set, NULL, NULL, ptimeout);
309
310 if (err == 0)
311 return K_TIMER;
312
313 if (err == -1) {
314 if (errno == EINTR)
315 return K_RESIZE;
316 return K_ERROR;
317 }
318
319 key = SLang_getkey();
320 if (key != K_ESC)
321 return key;
322
323 FD_ZERO(&read_set);
324 FD_SET(0, &read_set);
325 timeout.tv_sec = 0;
326 timeout.tv_usec = 20;
327 err = select(1, &read_set, NULL, NULL, &timeout);
328 if (err == 0)
329 return K_ESC;
330
331 SLang_ungetkey(key);
332 return SLkp_getkey();
333}
334
335int ui_browser__run(struct ui_browser *self, int delay_secs) 350int ui_browser__run(struct ui_browser *self, int delay_secs)
336{ 351{
337 int err, key; 352 int err, key;
338 353
339 pthread__unblock_sigwinch();
340
341 while (1) { 354 while (1) {
342 off_t offset; 355 off_t offset;
343 356
@@ -351,10 +364,7 @@ int ui_browser__run(struct ui_browser *self, int delay_secs)
351 key = ui__getch(delay_secs); 364 key = ui__getch(delay_secs);
352 365
353 if (key == K_RESIZE) { 366 if (key == K_RESIZE) {
354 pthread_mutex_lock(&ui__lock); 367 ui__refresh_dimensions(false);
355 SLtt_get_screen_size();
356 SLsmg_reinit_smg();
357 pthread_mutex_unlock(&ui__lock);
358 ui_browser__refresh_dimensions(self); 368 ui_browser__refresh_dimensions(self);
359 __ui_browser__show_title(self, self->title); 369 __ui_browser__show_title(self, self->title);
360 ui_helpline__puts(self->helpline); 370 ui_helpline__puts(self->helpline);
@@ -533,6 +543,47 @@ static int ui_browser__color_config(const char *var, const char *value,
533 return -1; 543 return -1;
534} 544}
535 545
546void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence)
547{
548 switch (whence) {
549 case SEEK_SET:
550 browser->top = browser->entries;
551 break;
552 case SEEK_CUR:
553 browser->top = browser->top + browser->top_idx + offset;
554 break;
555 case SEEK_END:
556 browser->top = browser->top + browser->nr_entries + offset;
557 break;
558 default:
559 return;
560 }
561}
562
563unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
564{
565 unsigned int row = 0, idx = browser->top_idx;
566 char **pos;
567
568 if (browser->top == NULL)
569 browser->top = browser->entries;
570
571 pos = (char **)browser->top;
572 while (idx < browser->nr_entries) {
573 if (!browser->filter || !browser->filter(browser, *pos)) {
574 ui_browser__gotorc(browser, row, 0);
575 browser->write(browser, pos, row);
576 if (++row == browser->height)
577 break;
578 }
579
580 ++idx;
581 ++pos;
582 }
583
584 return row;
585}
586
536void ui_browser__init(void) 587void ui_browser__init(void)
537{ 588{
538 int i = 0; 589 int i = 0;
diff --git a/tools/perf/util/ui/browser.h b/tools/perf/util/ui/browser.h
index a2c707d33c5e..84d761b730c1 100644
--- a/tools/perf/util/ui/browser.h
+++ b/tools/perf/util/ui/browser.h
@@ -43,6 +43,15 @@ void ui_browser__hide(struct ui_browser *self);
43int ui_browser__refresh(struct ui_browser *self); 43int ui_browser__refresh(struct ui_browser *self);
44int ui_browser__run(struct ui_browser *browser, int delay_secs); 44int ui_browser__run(struct ui_browser *browser, int delay_secs);
45void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries); 45void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries);
46void ui_browser__handle_resize(struct ui_browser *browser);
47
48int ui_browser__warning(struct ui_browser *browser, int timeout,
49 const char *format, ...);
50int ui_browser__help_window(struct ui_browser *browser, const char *text);
51bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text);
52
53void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence);
54unsigned int ui_browser__argv_refresh(struct ui_browser *browser);
46 55
47void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence); 56void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence);
48unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self); 57unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self);
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
index 4e0cb7fea7d9..0575905d1205 100644
--- a/tools/perf/util/ui/browsers/annotate.c
+++ b/tools/perf/util/ui/browsers/annotate.c
@@ -1,6 +1,9 @@
1#include "../../util.h"
1#include "../browser.h" 2#include "../browser.h"
2#include "../helpline.h" 3#include "../helpline.h"
3#include "../libslang.h" 4#include "../libslang.h"
5#include "../ui.h"
6#include "../util.h"
4#include "../../annotate.h" 7#include "../../annotate.h"
5#include "../../hist.h" 8#include "../../hist.h"
6#include "../../sort.h" 9#include "../../sort.h"
@@ -8,15 +11,6 @@
8#include <pthread.h> 11#include <pthread.h>
9#include <newt.h> 12#include <newt.h>
10 13
11static void ui__error_window(const char *fmt, ...)
12{
13 va_list ap;
14
15 va_start(ap, fmt);
16 newtWinMessagev((char *)"Error", (char *)"Ok", (char *)fmt, ap);
17 va_end(ap);
18}
19
20struct annotate_browser { 14struct annotate_browser {
21 struct ui_browser b; 15 struct ui_browser b;
22 struct rb_root entries; 16 struct rb_root entries;
@@ -400,7 +394,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
400 return -1; 394 return -1;
401 395
402 if (symbol__annotate(sym, map, sizeof(struct objdump_line_rb_node)) < 0) { 396 if (symbol__annotate(sym, map, sizeof(struct objdump_line_rb_node)) < 0) {
403 ui__error_window(ui_helpline__last_msg); 397 ui__error("%s", ui_helpline__last_msg);
404 return -1; 398 return -1;
405 } 399 }
406 400
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index 4663dcb2a19b..d0c94b459685 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -17,6 +17,7 @@
17#include "../browser.h" 17#include "../browser.h"
18#include "../helpline.h" 18#include "../helpline.h"
19#include "../util.h" 19#include "../util.h"
20#include "../ui.h"
20#include "map.h" 21#include "map.h"
21 22
22struct hist_browser { 23struct hist_browser {
@@ -294,6 +295,15 @@ static void hist_browser__set_folding(struct hist_browser *self, bool unfold)
294 ui_browser__reset_index(&self->b); 295 ui_browser__reset_index(&self->b);
295} 296}
296 297
298static void ui_browser__warn_lost_events(struct ui_browser *browser)
299{
300 ui_browser__warning(browser, 4,
301 "Events are being lost, check IO/CPU overload!\n\n"
302 "You may want to run 'perf' using a RT scheduler policy:\n\n"
303 " perf top -r 80\n\n"
304 "Or reduce the sampling frequency.");
305}
306
297static int hist_browser__run(struct hist_browser *self, const char *ev_name, 307static int hist_browser__run(struct hist_browser *self, const char *ev_name,
298 void(*timer)(void *arg), void *arg, int delay_secs) 308 void(*timer)(void *arg), void *arg, int delay_secs)
299{ 309{
@@ -314,12 +324,18 @@ static int hist_browser__run(struct hist_browser *self, const char *ev_name,
314 key = ui_browser__run(&self->b, delay_secs); 324 key = ui_browser__run(&self->b, delay_secs);
315 325
316 switch (key) { 326 switch (key) {
317 case -1: 327 case K_TIMER:
318 /* FIXME we need to check if it was es.reason == NEWT_EXIT_TIMER */
319 timer(arg); 328 timer(arg);
320 ui_browser__update_nr_entries(&self->b, self->hists->nr_entries); 329 ui_browser__update_nr_entries(&self->b, self->hists->nr_entries);
321 hists__browser_title(self->hists, title, sizeof(title), 330
322 ev_name); 331 if (self->hists->stats.nr_lost_warned !=
332 self->hists->stats.nr_events[PERF_RECORD_LOST]) {
333 self->hists->stats.nr_lost_warned =
334 self->hists->stats.nr_events[PERF_RECORD_LOST];
335 ui_browser__warn_lost_events(&self->b);
336 }
337
338 hists__browser_title(self->hists, title, sizeof(title), ev_name);
323 ui_browser__show_title(&self->b, title); 339 ui_browser__show_title(&self->b, title);
324 continue; 340 continue;
325 case 'D': { /* Debug */ 341 case 'D': { /* Debug */
@@ -883,7 +899,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
883 goto out_free_stack; 899 goto out_free_stack;
884 case 'a': 900 case 'a':
885 if (!browser->has_symbols) { 901 if (!browser->has_symbols) {
886 ui__warning( 902 ui_browser__warning(&browser->b, delay_secs * 2,
887 "Annotation is only available for symbolic views, " 903 "Annotation is only available for symbolic views, "
888 "include \"sym\" in --sort to use it."); 904 "include \"sym\" in --sort to use it.");
889 continue; 905 continue;
@@ -901,7 +917,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
901 case K_F1: 917 case K_F1:
902 case 'h': 918 case 'h':
903 case '?': 919 case '?':
904 ui__help_window("h/?/F1 Show this window\n" 920 ui_browser__help_window(&browser->b,
921 "h/?/F1 Show this window\n"
905 "UP/DOWN/PGUP\n" 922 "UP/DOWN/PGUP\n"
906 "PGDN/SPACE Navigate\n" 923 "PGDN/SPACE Navigate\n"
907 "q/ESC/CTRL+C Exit browser\n\n" 924 "q/ESC/CTRL+C Exit browser\n\n"
@@ -914,7 +931,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
914 "C Collapse all callchains\n" 931 "C Collapse all callchains\n"
915 "E Expand all callchains\n" 932 "E Expand all callchains\n"
916 "d Zoom into current DSO\n" 933 "d Zoom into current DSO\n"
917 "t Zoom into current Thread\n"); 934 "t Zoom into current Thread");
918 continue; 935 continue;
919 case K_ENTER: 936 case K_ENTER:
920 case K_RIGHT: 937 case K_RIGHT:
@@ -940,7 +957,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
940 } 957 }
941 case K_ESC: 958 case K_ESC:
942 if (!left_exits && 959 if (!left_exits &&
943 !ui__dialog_yesno("Do you really want to exit?")) 960 !ui_browser__dialog_yesno(&browser->b,
961 "Do you really want to exit?"))
944 continue; 962 continue;
945 /* Fall thru */ 963 /* Fall thru */
946 case 'q': 964 case 'q':
@@ -993,6 +1011,7 @@ add_exit_option:
993 1011
994 if (choice == annotate) { 1012 if (choice == annotate) {
995 struct hist_entry *he; 1013 struct hist_entry *he;
1014 int err;
996do_annotate: 1015do_annotate:
997 he = hist_browser__selected_entry(browser); 1016 he = hist_browser__selected_entry(browser);
998 if (he == NULL) 1017 if (he == NULL)
@@ -1001,10 +1020,12 @@ do_annotate:
1001 * Don't let this be freed, say, by hists__decay_entry. 1020 * Don't let this be freed, say, by hists__decay_entry.
1002 */ 1021 */
1003 he->used = true; 1022 he->used = true;
1004 hist_entry__tui_annotate(he, evsel->idx, nr_events, 1023 err = hist_entry__tui_annotate(he, evsel->idx, nr_events,
1005 timer, arg, delay_secs); 1024 timer, arg, delay_secs);
1006 he->used = false; 1025 he->used = false;
1007 ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries); 1026 ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
1027 if (err)
1028 ui_browser__handle_resize(&browser->b);
1008 } else if (choice == browse_map) 1029 } else if (choice == browse_map)
1009 map__browse(browser->selection->map); 1030 map__browse(browser->selection->map);
1010 else if (choice == zoom_dso) { 1031 else if (choice == zoom_dso) {
@@ -1056,6 +1077,7 @@ out:
1056struct perf_evsel_menu { 1077struct perf_evsel_menu {
1057 struct ui_browser b; 1078 struct ui_browser b;
1058 struct perf_evsel *selection; 1079 struct perf_evsel *selection;
1080 bool lost_events, lost_events_warned;
1059}; 1081};
1060 1082
1061static void perf_evsel_menu__write(struct ui_browser *browser, 1083static void perf_evsel_menu__write(struct ui_browser *browser,
@@ -1068,14 +1090,29 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
1068 unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE]; 1090 unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE];
1069 const char *ev_name = event_name(evsel); 1091 const char *ev_name = event_name(evsel);
1070 char bf[256], unit; 1092 char bf[256], unit;
1093 const char *warn = " ";
1094 size_t printed;
1071 1095
1072 ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED : 1096 ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
1073 HE_COLORSET_NORMAL); 1097 HE_COLORSET_NORMAL);
1074 1098
1075 nr_events = convert_unit(nr_events, &unit); 1099 nr_events = convert_unit(nr_events, &unit);
1076 snprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events, 1100 printed = snprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events,
1077 unit, unit == ' ' ? "" : " ", ev_name); 1101 unit, unit == ' ' ? "" : " ", ev_name);
1078 slsmg_write_nstring(bf, browser->width); 1102 slsmg_printf("%s", bf);
1103
1104 nr_events = evsel->hists.stats.nr_events[PERF_RECORD_LOST];
1105 if (nr_events != 0) {
1106 menu->lost_events = true;
1107 if (!current_entry)
1108 ui_browser__set_color(browser, HE_COLORSET_TOP);
1109 nr_events = convert_unit(nr_events, &unit);
1110 snprintf(bf, sizeof(bf), ": %ld%c%schunks LOST!", nr_events,
1111 unit, unit == ' ' ? "" : " ");
1112 warn = bf;
1113 }
1114
1115 slsmg_write_nstring(warn, browser->width - printed);
1079 1116
1080 if (current_entry) 1117 if (current_entry)
1081 menu->selection = evsel; 1118 menu->selection = evsel;
@@ -1100,6 +1137,11 @@ static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
1100 switch (key) { 1137 switch (key) {
1101 case K_TIMER: 1138 case K_TIMER:
1102 timer(arg); 1139 timer(arg);
1140
1141 if (!menu->lost_events_warned && menu->lost_events) {
1142 ui_browser__warn_lost_events(&menu->b);
1143 menu->lost_events_warned = true;
1144 }
1103 continue; 1145 continue;
1104 case K_RIGHT: 1146 case K_RIGHT:
1105 case K_ENTER: 1147 case K_ENTER:
@@ -1133,7 +1175,8 @@ browse_hists:
1133 pos = list_entry(pos->node.prev, struct perf_evsel, node); 1175 pos = list_entry(pos->node.prev, struct perf_evsel, node);
1134 goto browse_hists; 1176 goto browse_hists;
1135 case K_ESC: 1177 case K_ESC:
1136 if (!ui__dialog_yesno("Do you really want to exit?")) 1178 if (!ui_browser__dialog_yesno(&menu->b,
1179 "Do you really want to exit?"))
1137 continue; 1180 continue;
1138 /* Fall thru */ 1181 /* Fall thru */
1139 case 'q': 1182 case 'q':
@@ -1145,7 +1188,8 @@ browse_hists:
1145 case K_LEFT: 1188 case K_LEFT:
1146 continue; 1189 continue;
1147 case K_ESC: 1190 case K_ESC:
1148 if (!ui__dialog_yesno("Do you really want to exit?")) 1191 if (!ui_browser__dialog_yesno(&menu->b,
1192 "Do you really want to exit?"))
1149 continue; 1193 continue;
1150 /* Fall thru */ 1194 /* Fall thru */
1151 case 'q': 1195 case 'q':
diff --git a/tools/perf/util/ui/helpline.c b/tools/perf/util/ui/helpline.c
index f36d2ff509ed..6ef3c5691762 100644
--- a/tools/perf/util/ui/helpline.c
+++ b/tools/perf/util/ui/helpline.c
@@ -1,20 +1,28 @@
1#define _GNU_SOURCE 1#define _GNU_SOURCE
2#include <stdio.h> 2#include <stdio.h>
3#include <stdlib.h> 3#include <stdlib.h>
4#include <newt.h> 4#include <string.h>
5 5
6#include "../debug.h" 6#include "../debug.h"
7#include "helpline.h" 7#include "helpline.h"
8#include "ui.h" 8#include "ui.h"
9#include "libslang.h"
9 10
10void ui_helpline__pop(void) 11void ui_helpline__pop(void)
11{ 12{
12 newtPopHelpLine();
13} 13}
14 14
15char ui_helpline__current[512];
16
15void ui_helpline__push(const char *msg) 17void ui_helpline__push(const char *msg)
16{ 18{
17 newtPushHelpLine(msg); 19 const size_t sz = sizeof(ui_helpline__current);
20
21 SLsmg_gotorc(SLtt_Screen_Rows - 1, 0);
22 SLsmg_set_color(0);
23 SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols);
24 SLsmg_refresh();
25 strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0';
18} 26}
19 27
20void ui_helpline__vpush(const char *fmt, va_list ap) 28void ui_helpline__vpush(const char *fmt, va_list ap)
@@ -63,7 +71,7 @@ int ui_helpline__show_help(const char *format, va_list ap)
63 71
64 if (ui_helpline__last_msg[backlog - 1] == '\n') { 72 if (ui_helpline__last_msg[backlog - 1] == '\n') {
65 ui_helpline__puts(ui_helpline__last_msg); 73 ui_helpline__puts(ui_helpline__last_msg);
66 newtRefresh(); 74 SLsmg_refresh();
67 backlog = 0; 75 backlog = 0;
68 } 76 }
69 pthread_mutex_unlock(&ui__lock); 77 pthread_mutex_unlock(&ui__lock);
diff --git a/tools/perf/util/ui/helpline.h b/tools/perf/util/ui/helpline.h
index fdcbc0270acd..7bab6b34e35e 100644
--- a/tools/perf/util/ui/helpline.h
+++ b/tools/perf/util/ui/helpline.h
@@ -11,4 +11,6 @@ void ui_helpline__vpush(const char *fmt, va_list ap);
11void ui_helpline__fpush(const char *fmt, ...); 11void ui_helpline__fpush(const char *fmt, ...);
12void ui_helpline__puts(const char *msg); 12void ui_helpline__puts(const char *msg);
13 13
14extern char ui_helpline__current[];
15
14#endif /* _PERF_UI_HELPLINE_H_ */ 16#endif /* _PERF_UI_HELPLINE_H_ */
diff --git a/tools/perf/util/ui/progress.c b/tools/perf/util/ui/progress.c
index d7fc399d36b3..295e366b6311 100644
--- a/tools/perf/util/ui/progress.c
+++ b/tools/perf/util/ui/progress.c
@@ -1,60 +1,29 @@
1#include <stdlib.h>
2#include <newt.h>
3#include "../cache.h" 1#include "../cache.h"
4#include "progress.h" 2#include "progress.h"
3#include "libslang.h"
4#include "ui.h"
5#include "browser.h"
5 6
6struct ui_progress { 7void ui_progress__update(u64 curr, u64 total, const char *title)
7 newtComponent form, scale;
8};
9
10struct ui_progress *ui_progress__new(const char *title, u64 total)
11{
12 struct ui_progress *self = malloc(sizeof(*self));
13
14 if (self != NULL) {
15 int cols;
16
17 if (use_browser <= 0)
18 return self;
19 newtGetScreenSize(&cols, NULL);
20 cols -= 4;
21 newtCenteredWindow(cols, 1, title);
22 self->form = newtForm(NULL, NULL, 0);
23 if (self->form == NULL)
24 goto out_free_self;
25 self->scale = newtScale(0, 0, cols, total);
26 if (self->scale == NULL)
27 goto out_free_form;
28 newtFormAddComponent(self->form, self->scale);
29 newtRefresh();
30 }
31
32 return self;
33
34out_free_form:
35 newtFormDestroy(self->form);
36out_free_self:
37 free(self);
38 return NULL;
39}
40
41void ui_progress__update(struct ui_progress *self, u64 curr)
42{ 8{
9 int bar, y;
43 /* 10 /*
44 * FIXME: We should have a per UI backend way of showing progress, 11 * FIXME: We should have a per UI backend way of showing progress,
45 * stdio will just show a percentage as NN%, etc. 12 * stdio will just show a percentage as NN%, etc.
46 */ 13 */
47 if (use_browser <= 0) 14 if (use_browser <= 0)
48 return; 15 return;
49 newtScaleSet(self->scale, curr);
50 newtRefresh();
51}
52 16
53void ui_progress__delete(struct ui_progress *self) 17 ui__refresh_dimensions(true);
54{ 18 pthread_mutex_lock(&ui__lock);
55 if (use_browser > 0) { 19 y = SLtt_Screen_Rows / 2 - 2;
56 newtFormDestroy(self->form); 20 SLsmg_set_color(0);
57 newtPopWindow(); 21 SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols);
58 } 22 SLsmg_gotorc(y++, 1);
59 free(self); 23 SLsmg_write_string((char *)title);
24 SLsmg_set_color(HE_COLORSET_SELECTED);
25 bar = ((SLtt_Screen_Cols - 2) * curr) / total;
26 SLsmg_fill_region(y, 1, 1, bar, ' ');
27 SLsmg_refresh();
28 pthread_mutex_unlock(&ui__lock);
60} 29}
diff --git a/tools/perf/util/ui/progress.h b/tools/perf/util/ui/progress.h
index a3820a0beb5b..d9c205b59aa1 100644
--- a/tools/perf/util/ui/progress.h
+++ b/tools/perf/util/ui/progress.h
@@ -1,11 +1,8 @@
1#ifndef _PERF_UI_PROGRESS_H_ 1#ifndef _PERF_UI_PROGRESS_H_
2#define _PERF_UI_PROGRESS_H_ 1 2#define _PERF_UI_PROGRESS_H_ 1
3 3
4struct ui_progress; 4#include <../types.h>
5 5
6struct ui_progress *ui_progress__new(const char *title, u64 total); 6void ui_progress__update(u64 curr, u64 total, const char *title);
7void ui_progress__delete(struct ui_progress *self);
8
9void ui_progress__update(struct ui_progress *self, u64 curr);
10 7
11#endif 8#endif
diff --git a/tools/perf/util/ui/setup.c b/tools/perf/util/ui/setup.c
index 1e6ba06980c4..85a69faa09aa 100644
--- a/tools/perf/util/ui/setup.c
+++ b/tools/perf/util/ui/setup.c
@@ -7,10 +7,85 @@
7#include "browser.h" 7#include "browser.h"
8#include "helpline.h" 8#include "helpline.h"
9#include "ui.h" 9#include "ui.h"
10#include "util.h"
10#include "libslang.h" 11#include "libslang.h"
12#include "keysyms.h"
11 13
12pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER; 14pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
13 15
16static volatile int ui__need_resize;
17
18void ui__refresh_dimensions(bool force)
19{
20 if (force || ui__need_resize) {
21 ui__need_resize = 0;
22 pthread_mutex_lock(&ui__lock);
23 SLtt_get_screen_size();
24 SLsmg_reinit_smg();
25 pthread_mutex_unlock(&ui__lock);
26 }
27}
28
29static void ui__sigwinch(int sig __used)
30{
31 ui__need_resize = 1;
32}
33
34static void ui__setup_sigwinch(void)
35{
36 static bool done;
37
38 if (done)
39 return;
40
41 done = true;
42 pthread__unblock_sigwinch();
43 signal(SIGWINCH, ui__sigwinch);
44}
45
46int ui__getch(int delay_secs)
47{
48 struct timeval timeout, *ptimeout = delay_secs ? &timeout : NULL;
49 fd_set read_set;
50 int err, key;
51
52 ui__setup_sigwinch();
53
54 FD_ZERO(&read_set);
55 FD_SET(0, &read_set);
56
57 if (delay_secs) {
58 timeout.tv_sec = delay_secs;
59 timeout.tv_usec = 0;
60 }
61
62 err = select(1, &read_set, NULL, NULL, ptimeout);
63
64 if (err == 0)
65 return K_TIMER;
66
67 if (err == -1) {
68 if (errno == EINTR)
69 return K_RESIZE;
70 return K_ERROR;
71 }
72
73 key = SLang_getkey();
74 if (key != K_ESC)
75 return key;
76
77 FD_ZERO(&read_set);
78 FD_SET(0, &read_set);
79 timeout.tv_sec = 0;
80 timeout.tv_usec = 20;
81 err = select(1, &read_set, NULL, NULL, &timeout);
82 if (err == 0)
83 return K_ESC;
84
85 SLang_ungetkey(key);
86 return SLkp_getkey();
87}
88
14static void newt_suspend(void *d __used) 89static void newt_suspend(void *d __used)
15{ 90{
16 newtSuspend(); 91 newtSuspend();
@@ -71,10 +146,10 @@ void setup_browser(bool fallback_to_pager)
71void exit_browser(bool wait_for_ok) 146void exit_browser(bool wait_for_ok)
72{ 147{
73 if (use_browser > 0) { 148 if (use_browser > 0) {
74 if (wait_for_ok) { 149 if (wait_for_ok)
75 char title[] = "Fatal Error", ok[] = "Ok"; 150 ui__question_window("Fatal Error",
76 newtWinMessage(title, ok, ui_helpline__last_msg); 151 ui_helpline__last_msg,
77 } 152 "Press any key...", 0);
78 ui__exit(); 153 ui__exit();
79 } 154 }
80} 155}
diff --git a/tools/perf/util/ui/ui.h b/tools/perf/util/ui/ui.h
index d264e059c829..7b67045479f6 100644
--- a/tools/perf/util/ui/ui.h
+++ b/tools/perf/util/ui/ui.h
@@ -2,7 +2,10 @@
2#define _PERF_UI_H_ 1 2#define _PERF_UI_H_ 1
3 3
4#include <pthread.h> 4#include <pthread.h>
5#include <stdbool.h>
5 6
6extern pthread_mutex_t ui__lock; 7extern pthread_mutex_t ui__lock;
7 8
9void ui__refresh_dimensions(bool force);
10
8#endif /* _PERF_UI_H_ */ 11#endif /* _PERF_UI_H_ */
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c
index fdf1fc8f08bc..45daa7c41dad 100644
--- a/tools/perf/util/ui/util.c
+++ b/tools/perf/util/ui/util.c
@@ -1,6 +1,5 @@
1#include <newt.h> 1#include "../util.h"
2#include <signal.h> 2#include <signal.h>
3#include <stdio.h>
4#include <stdbool.h> 3#include <stdbool.h>
5#include <string.h> 4#include <string.h>
6#include <sys/ttydefaults.h> 5#include <sys/ttydefaults.h>
@@ -8,72 +7,75 @@
8#include "../cache.h" 7#include "../cache.h"
9#include "../debug.h" 8#include "../debug.h"
10#include "browser.h" 9#include "browser.h"
10#include "keysyms.h"
11#include "helpline.h" 11#include "helpline.h"
12#include "ui.h" 12#include "ui.h"
13#include "util.h" 13#include "util.h"
14#include "libslang.h"
14 15
15static void newt_form__set_exit_keys(newtComponent self) 16static void ui_browser__argv_write(struct ui_browser *browser,
17 void *entry, int row)
16{ 18{
17 newtFormAddHotKey(self, NEWT_KEY_LEFT); 19 char **arg = entry;
18 newtFormAddHotKey(self, NEWT_KEY_ESCAPE); 20 bool current_entry = ui_browser__is_current_entry(browser, row);
19 newtFormAddHotKey(self, 'Q');
20 newtFormAddHotKey(self, 'q');
21 newtFormAddHotKey(self, CTRL('c'));
22}
23 21
24static newtComponent newt_form__new(void) 22 ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
25{ 23 HE_COLORSET_NORMAL);
26 newtComponent self = newtForm(NULL, NULL, 0); 24 slsmg_write_nstring(*arg, browser->width);
27 if (self)
28 newt_form__set_exit_keys(self);
29 return self;
30} 25}
31 26
32int ui__popup_menu(int argc, char * const argv[]) 27static int popup_menu__run(struct ui_browser *menu)
33{ 28{
34 struct newtExitStruct es; 29 int key;
35 int i, rc = -1, max_len = 5;
36 newtComponent listbox, form = newt_form__new();
37 30
38 if (form == NULL) 31 if (ui_browser__show(menu, " ", "ESC: exit, ENTER|->: Select option") < 0)
39 return -1; 32 return -1;
40 33
41 listbox = newtListbox(0, 0, argc, NEWT_FLAG_RETURNEXIT); 34 while (1) {
42 if (listbox == NULL) 35 key = ui_browser__run(menu, 0);
43 goto out_destroy_form;
44 36
45 newtFormAddComponent(form, listbox); 37 switch (key) {
38 case K_RIGHT:
39 case K_ENTER:
40 key = menu->index;
41 break;
42 case K_LEFT:
43 case K_ESC:
44 case 'q':
45 case CTRL('c'):
46 key = -1;
47 break;
48 default:
49 continue;
50 }
46 51
47 for (i = 0; i < argc; ++i) { 52 break;
48 int len = strlen(argv[i]);
49 if (len > max_len)
50 max_len = len;
51 if (newtListboxAddEntry(listbox, argv[i], (void *)(long)i))
52 goto out_destroy_form;
53 } 53 }
54 54
55 newtCenteredWindow(max_len, argc, NULL); 55 ui_browser__hide(menu);
56 newtFormRun(form, &es); 56 return key;
57 rc = newtListboxGetCurrent(listbox) - NULL;
58 if (es.reason == NEWT_EXIT_HOTKEY)
59 rc = -1;
60 newtPopWindow();
61out_destroy_form:
62 newtFormDestroy(form);
63 return rc;
64} 57}
65 58
66int ui__help_window(const char *text) 59int ui__popup_menu(int argc, char * const argv[])
67{ 60{
68 struct newtExitStruct es; 61 struct ui_browser menu = {
69 newtComponent tb, form = newt_form__new(); 62 .entries = (void *)argv,
70 int rc = -1; 63 .refresh = ui_browser__argv_refresh,
64 .seek = ui_browser__argv_seek,
65 .write = ui_browser__argv_write,
66 .nr_entries = argc,
67 };
68
69 return popup_menu__run(&menu);
70}
71
72int ui__question_window(const char *title, const char *text,
73 const char *exit_msg, int delay_secs)
74{
75 int x, y;
71 int max_len = 0, nr_lines = 0; 76 int max_len = 0, nr_lines = 0;
72 const char *t; 77 const char *t;
73 78
74 if (form == NULL)
75 return -1;
76
77 t = text; 79 t = text;
78 while (1) { 80 while (1) {
79 const char *sep = strchr(t, '\n'); 81 const char *sep = strchr(t, '\n');
@@ -90,41 +92,77 @@ int ui__help_window(const char *text)
90 t = sep + 1; 92 t = sep + 1;
91 } 93 }
92 94
93 tb = newtTextbox(0, 0, max_len, nr_lines, 0); 95 max_len += 2;
94 if (tb == NULL) 96 nr_lines += 4;
95 goto out_destroy_form; 97 y = SLtt_Screen_Rows / 2 - nr_lines / 2,
96 98 x = SLtt_Screen_Cols / 2 - max_len / 2;
97 newtTextboxSetText(tb, text); 99
98 newtFormAddComponent(form, tb); 100 SLsmg_set_color(0);
99 newtCenteredWindow(max_len, nr_lines, NULL); 101 SLsmg_draw_box(y, x++, nr_lines, max_len);
100 newtFormRun(form, &es); 102 if (title) {
101 newtPopWindow(); 103 SLsmg_gotorc(y, x + 1);
102 rc = 0; 104 SLsmg_write_string((char *)title);
103out_destroy_form: 105 }
104 newtFormDestroy(form); 106 SLsmg_gotorc(++y, x);
105 return rc; 107 nr_lines -= 2;
108 max_len -= 2;
109 SLsmg_write_wrapped_string((unsigned char *)text, y, x,
110 nr_lines, max_len, 1);
111 SLsmg_gotorc(y + nr_lines - 2, x);
112 SLsmg_write_nstring((char *)" ", max_len);
113 SLsmg_gotorc(y + nr_lines - 1, x);
114 SLsmg_write_nstring((char *)exit_msg, max_len);
115 SLsmg_refresh();
116 return ui__getch(delay_secs);
106} 117}
107 118
108static const char yes[] = "Yes", no[] = "No", 119int ui__help_window(const char *text)
109 warning_str[] = "Warning!", ok[] = "Ok"; 120{
121 return ui__question_window("Help", text, "Press any key...", 0);
122}
110 123
111bool ui__dialog_yesno(const char *msg) 124int ui__dialog_yesno(const char *msg)
112{ 125{
113 /* newtWinChoice should really be accepting const char pointers... */ 126 return ui__question_window(NULL, msg, "Enter: Yes, ESC: No", 0);
114 return newtWinChoice(NULL, (char *)yes, (char *)no, (char *)msg) == 1;
115} 127}
116 128
117void ui__warning(const char *format, ...) 129int __ui__warning(const char *title, const char *format, va_list args)
118{ 130{
119 va_list args; 131 char *s;
132
133 if (use_browser > 0 && vasprintf(&s, format, args) > 0) {
134 int key;
120 135
121 va_start(args, format);
122 if (use_browser > 0) {
123 pthread_mutex_lock(&ui__lock); 136 pthread_mutex_lock(&ui__lock);
124 newtWinMessagev((char *)warning_str, (char *)ok, 137 key = ui__question_window(title, s, "Press any key...", 0);
125 (char *)format, args);
126 pthread_mutex_unlock(&ui__lock); 138 pthread_mutex_unlock(&ui__lock);
127 } else 139 free(s);
128 vfprintf(stderr, format, args); 140 return key;
141 }
142
143 fprintf(stderr, "%s:\n", title);
144 vfprintf(stderr, format, args);
145 return K_ESC;
146}
147
148int ui__warning(const char *format, ...)
149{
150 int key;
151 va_list args;
152
153 va_start(args, format);
154 key = __ui__warning("Warning", format, args);
155 va_end(args);
156 return key;
157}
158
159int ui__error(const char *format, ...)
160{
161 int key;
162 va_list args;
163
164 va_start(args, format);
165 key = __ui__warning("Error", format, args);
129 va_end(args); 166 va_end(args);
167 return key;
130} 168}
diff --git a/tools/perf/util/ui/util.h b/tools/perf/util/ui/util.h
index afcbc1d99531..2d1738bd71c8 100644
--- a/tools/perf/util/ui/util.h
+++ b/tools/perf/util/ui/util.h
@@ -1,10 +1,14 @@
1#ifndef _PERF_UI_UTIL_H_ 1#ifndef _PERF_UI_UTIL_H_
2#define _PERF_UI_UTIL_H_ 1 2#define _PERF_UI_UTIL_H_ 1
3 3
4#include <stdbool.h> 4#include <stdarg.h>
5 5
6int ui__getch(int delay_secs);
6int ui__popup_menu(int argc, char * const argv[]); 7int ui__popup_menu(int argc, char * const argv[]);
7int ui__help_window(const char *text); 8int ui__help_window(const char *text);
8bool ui__dialog_yesno(const char *msg); 9int ui__dialog_yesno(const char *msg);
10int ui__question_window(const char *title, const char *text,
11 const char *exit_msg, int delay_secs);
12int __ui__warning(const char *title, const char *format, va_list args);
9 13
10#endif /* _PERF_UI_UTIL_H_ */ 14#endif /* _PERF_UI_UTIL_H_ */
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 8b2d37b59c9e..3c6f7808efae 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -162,19 +162,21 @@ void print_header(void)
162 162
163void dump_cnt(struct counters *cnt) 163void dump_cnt(struct counters *cnt)
164{ 164{
165 fprintf(stderr, "package: %d ", cnt->pkg); 165 if (!cnt)
166 fprintf(stderr, "core:: %d ", cnt->core); 166 return;
167 fprintf(stderr, "CPU: %d ", cnt->cpu); 167 if (cnt->pkg) fprintf(stderr, "package: %d ", cnt->pkg);
168 fprintf(stderr, "TSC: %016llX\n", cnt->tsc); 168 if (cnt->core) fprintf(stderr, "core:: %d ", cnt->core);
169 fprintf(stderr, "c3: %016llX\n", cnt->c3); 169 if (cnt->cpu) fprintf(stderr, "CPU: %d ", cnt->cpu);
170 fprintf(stderr, "c6: %016llX\n", cnt->c6); 170 if (cnt->tsc) fprintf(stderr, "TSC: %016llX\n", cnt->tsc);
171 fprintf(stderr, "c7: %016llX\n", cnt->c7); 171 if (cnt->c3) fprintf(stderr, "c3: %016llX\n", cnt->c3);
172 fprintf(stderr, "aperf: %016llX\n", cnt->aperf); 172 if (cnt->c6) fprintf(stderr, "c6: %016llX\n", cnt->c6);
173 fprintf(stderr, "pc2: %016llX\n", cnt->pc2); 173 if (cnt->c7) fprintf(stderr, "c7: %016llX\n", cnt->c7);
174 fprintf(stderr, "pc3: %016llX\n", cnt->pc3); 174 if (cnt->aperf) fprintf(stderr, "aperf: %016llX\n", cnt->aperf);
175 fprintf(stderr, "pc6: %016llX\n", cnt->pc6); 175 if (cnt->pc2) fprintf(stderr, "pc2: %016llX\n", cnt->pc2);
176 fprintf(stderr, "pc7: %016llX\n", cnt->pc7); 176 if (cnt->pc3) fprintf(stderr, "pc3: %016llX\n", cnt->pc3);
177 fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr); 177 if (cnt->pc6) fprintf(stderr, "pc6: %016llX\n", cnt->pc6);
178 if (cnt->pc7) fprintf(stderr, "pc7: %016llX\n", cnt->pc7);
179 if (cnt->extra_msr) fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr);
178} 180}
179 181
180void dump_list(struct counters *cnt) 182void dump_list(struct counters *cnt)
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 8d02ccb10c59..30e2befd6f2a 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -42,6 +42,7 @@ $default{"BISECT_MANUAL"} = 0;
42$default{"BISECT_SKIP"} = 1; 42$default{"BISECT_SKIP"} = 1;
43$default{"SUCCESS_LINE"} = "login:"; 43$default{"SUCCESS_LINE"} = "login:";
44$default{"DETECT_TRIPLE_FAULT"} = 1; 44$default{"DETECT_TRIPLE_FAULT"} = 1;
45$default{"NO_INSTALL"} = 0;
45$default{"BOOTED_TIMEOUT"} = 1; 46$default{"BOOTED_TIMEOUT"} = 1;
46$default{"DIE_ON_FAILURE"} = 1; 47$default{"DIE_ON_FAILURE"} = 1;
47$default{"SSH_EXEC"} = "ssh \$SSH_USER\@\$MACHINE \$SSH_COMMAND"; 48$default{"SSH_EXEC"} = "ssh \$SSH_USER\@\$MACHINE \$SSH_COMMAND";
@@ -84,6 +85,7 @@ my $grub_number;
84my $target; 85my $target;
85my $make; 86my $make;
86my $post_install; 87my $post_install;
88my $no_install;
87my $noclean; 89my $noclean;
88my $minconfig; 90my $minconfig;
89my $start_minconfig; 91my $start_minconfig;
@@ -115,6 +117,7 @@ my $timeout;
115my $booted_timeout; 117my $booted_timeout;
116my $detect_triplefault; 118my $detect_triplefault;
117my $console; 119my $console;
120my $reboot_success_line;
118my $success_line; 121my $success_line;
119my $stop_after_success; 122my $stop_after_success;
120my $stop_after_failure; 123my $stop_after_failure;
@@ -130,6 +133,12 @@ my %config_help;
130my %variable; 133my %variable;
131my %force_config; 134my %force_config;
132 135
136# do not force reboots on config problems
137my $no_reboot = 1;
138
139# default variables that can be used
140chomp ($variable{"PWD"} = `pwd`);
141
133$config_help{"MACHINE"} = << "EOF" 142$config_help{"MACHINE"} = << "EOF"
134 The machine hostname that you will test. 143 The machine hostname that you will test.
135EOF 144EOF
@@ -241,6 +250,7 @@ sub read_yn {
241 250
242sub get_ktest_config { 251sub get_ktest_config {
243 my ($config) = @_; 252 my ($config) = @_;
253 my $ans;
244 254
245 return if (defined($opt{$config})); 255 return if (defined($opt{$config}));
246 256
@@ -254,16 +264,17 @@ sub get_ktest_config {
254 if (defined($default{$config})) { 264 if (defined($default{$config})) {
255 print "\[$default{$config}\] "; 265 print "\[$default{$config}\] ";
256 } 266 }
257 $entered_configs{$config} = <STDIN>; 267 $ans = <STDIN>;
258 $entered_configs{$config} =~ s/^\s*(.*\S)\s*$/$1/; 268 $ans =~ s/^\s*(.*\S)\s*$/$1/;
259 if ($entered_configs{$config} =~ /^\s*$/) { 269 if ($ans =~ /^\s*$/) {
260 if ($default{$config}) { 270 if ($default{$config}) {
261 $entered_configs{$config} = $default{$config}; 271 $ans = $default{$config};
262 } else { 272 } else {
263 print "Your answer can not be blank\n"; 273 print "Your answer can not be blank\n";
264 next; 274 next;
265 } 275 }
266 } 276 }
277 $entered_configs{$config} = process_variables($ans);
267 last; 278 last;
268 } 279 }
269} 280}
@@ -298,7 +309,7 @@ sub get_ktest_configs {
298} 309}
299 310
300sub process_variables { 311sub process_variables {
301 my ($value) = @_; 312 my ($value, $remove_undef) = @_;
302 my $retval = ""; 313 my $retval = "";
303 314
304 # We want to check for '\', and it is just easier 315 # We want to check for '\', and it is just easier
@@ -316,6 +327,10 @@ sub process_variables {
316 $retval = "$retval$begin"; 327 $retval = "$retval$begin";
317 if (defined($variable{$var})) { 328 if (defined($variable{$var})) {
318 $retval = "$retval$variable{$var}"; 329 $retval = "$retval$variable{$var}";
330 } elsif (defined($remove_undef) && $remove_undef) {
331 # for if statements, any variable that is not defined,
332 # we simple convert to 0
333 $retval = "${retval}0";
319 } else { 334 } else {
320 # put back the origin piece. 335 # put back the origin piece.
321 $retval = "$retval\$\{$var\}"; 336 $retval = "$retval\$\{$var\}";
@@ -331,10 +346,17 @@ sub process_variables {
331} 346}
332 347
333sub set_value { 348sub set_value {
334 my ($lvalue, $rvalue) = @_; 349 my ($lvalue, $rvalue, $override, $overrides, $name) = @_;
335 350
336 if (defined($opt{$lvalue})) { 351 if (defined($opt{$lvalue})) {
337 die "Error: Option $lvalue defined more than once!\n"; 352 if (!$override || defined(${$overrides}{$lvalue})) {
353 my $extra = "";
354 if ($override) {
355 $extra = "In the same override section!\n";
356 }
357 die "$name: $.: Option $lvalue defined more than once!\n$extra";
358 }
359 ${$overrides}{$lvalue} = $rvalue;
338 } 360 }
339 if ($rvalue =~ /^\s*$/) { 361 if ($rvalue =~ /^\s*$/) {
340 delete $opt{$lvalue}; 362 delete $opt{$lvalue};
@@ -355,86 +377,274 @@ sub set_variable {
355 } 377 }
356} 378}
357 379
358sub read_config { 380sub process_compare {
359 my ($config) = @_; 381 my ($lval, $cmp, $rval) = @_;
382
383 # remove whitespace
384
385 $lval =~ s/^\s*//;
386 $lval =~ s/\s*$//;
387
388 $rval =~ s/^\s*//;
389 $rval =~ s/\s*$//;
390
391 if ($cmp eq "==") {
392 return $lval eq $rval;
393 } elsif ($cmp eq "!=") {
394 return $lval ne $rval;
395 }
396
397 my $statement = "$lval $cmp $rval";
398 my $ret = eval $statement;
399
400 # $@ stores error of eval
401 if ($@) {
402 return -1;
403 }
404
405 return $ret;
406}
407
408sub value_defined {
409 my ($val) = @_;
410
411 return defined($variable{$2}) ||
412 defined($opt{$2});
413}
414
415my $d = 0;
416sub process_expression {
417 my ($name, $val) = @_;
418
419 my $c = $d++;
420
421 while ($val =~ s/\(([^\(]*?)\)/\&\&\&\&VAL\&\&\&\&/) {
422 my $express = $1;
423
424 if (process_expression($name, $express)) {
425 $val =~ s/\&\&\&\&VAL\&\&\&\&/ 1 /;
426 } else {
427 $val =~ s/\&\&\&\&VAL\&\&\&\&/ 0 /;
428 }
429 }
430
431 $d--;
432 my $OR = "\\|\\|";
433 my $AND = "\\&\\&";
434
435 while ($val =~ s/^(.*?)($OR|$AND)//) {
436 my $express = $1;
437 my $op = $2;
438
439 if (process_expression($name, $express)) {
440 if ($op eq "||") {
441 return 1;
442 }
443 } else {
444 if ($op eq "&&") {
445 return 0;
446 }
447 }
448 }
449
450 if ($val =~ /(.*)(==|\!=|>=|<=|>|<)(.*)/) {
451 my $ret = process_compare($1, $2, $3);
452 if ($ret < 0) {
453 die "$name: $.: Unable to process comparison\n";
454 }
455 return $ret;
456 }
457
458 if ($val =~ /^\s*(NOT\s*)?DEFINED\s+(\S+)\s*$/) {
459 if (defined $1) {
460 return !value_defined($2);
461 } else {
462 return value_defined($2);
463 }
464 }
465
466 if ($val =~ /^\s*0\s*$/) {
467 return 0;
468 } elsif ($val =~ /^\s*\d+\s*$/) {
469 return 1;
470 }
471
472 die ("$name: $.: Undefined content $val in if statement\n");
473}
474
475sub process_if {
476 my ($name, $value) = @_;
477
478 # Convert variables and replace undefined ones with 0
479 my $val = process_variables($value, 1);
480 my $ret = process_expression $name, $val;
481
482 return $ret;
483}
360 484
361 open(IN, $config) || die "can't read file $config"; 485sub __read_config {
486 my ($config, $current_test_num) = @_;
487
488 my $in;
489 open($in, $config) || die "can't read file $config";
362 490
363 my $name = $config; 491 my $name = $config;
364 $name =~ s,.*/(.*),$1,; 492 $name =~ s,.*/(.*),$1,;
365 493
366 my $test_num = 0; 494 my $test_num = $$current_test_num;
367 my $default = 1; 495 my $default = 1;
368 my $repeat = 1; 496 my $repeat = 1;
369 my $num_tests_set = 0; 497 my $num_tests_set = 0;
370 my $skip = 0; 498 my $skip = 0;
371 my $rest; 499 my $rest;
500 my $line;
372 my $test_case = 0; 501 my $test_case = 0;
502 my $if = 0;
503 my $if_set = 0;
504 my $override = 0;
373 505
374 while (<IN>) { 506 my %overrides;
507
508 while (<$in>) {
375 509
376 # ignore blank lines and comments 510 # ignore blank lines and comments
377 next if (/^\s*$/ || /\s*\#/); 511 next if (/^\s*$/ || /\s*\#/);
378 512
379 if (/^\s*TEST_START(.*)/) { 513 if (/^\s*(TEST_START|DEFAULTS)\b(.*)/) {
380 514
381 $rest = $1; 515 my $type = $1;
516 $rest = $2;
517 $line = $2;
382 518
383 if ($num_tests_set) { 519 my $old_test_num;
384 die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n"; 520 my $old_repeat;
385 } 521 $override = 0;
522
523 if ($type eq "TEST_START") {
524
525 if ($num_tests_set) {
526 die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
527 }
386 528
387 my $old_test_num = $test_num; 529 $old_test_num = $test_num;
388 my $old_repeat = $repeat; 530 $old_repeat = $repeat;
389 531
390 $test_num += $repeat; 532 $test_num += $repeat;
391 $default = 0; 533 $default = 0;
392 $repeat = 1; 534 $repeat = 1;
535 } else {
536 $default = 1;
537 }
393 538
394 if ($rest =~ /\s+SKIP(.*)/) { 539 # If SKIP is anywhere in the line, the command will be skipped
395 $rest = $1; 540 if ($rest =~ s/\s+SKIP\b//) {
396 $skip = 1; 541 $skip = 1;
397 } else { 542 } else {
398 $test_case = 1; 543 $test_case = 1;
399 $skip = 0; 544 $skip = 0;
400 } 545 }
401 546
402 if ($rest =~ /\s+ITERATE\s+(\d+)(.*)$/) { 547 if ($rest =~ s/\sELSE\b//) {
403 $repeat = $1; 548 if (!$if) {
404 $rest = $2; 549 die "$name: $.: ELSE found with out matching IF section\n$_";
405 $repeat_tests{"$test_num"} = $repeat; 550 }
551 $if = 0;
552
553 if ($if_set) {
554 $skip = 1;
555 } else {
556 $skip = 0;
557 }
406 } 558 }
407 559
408 if ($rest =~ /\s+SKIP(.*)/) { 560 if ($rest =~ s/\sIF\s+(.*)//) {
409 $rest = $1; 561 if (process_if($name, $1)) {
410 $skip = 1; 562 $if_set = 1;
563 } else {
564 $skip = 1;
565 }
566 $if = 1;
567 } else {
568 $if = 0;
569 $if_set = 0;
411 } 570 }
412 571
413 if ($rest !~ /^\s*$/) { 572 if (!$skip) {
414 die "$name: $.: Gargbage found after TEST_START\n$_"; 573 if ($type eq "TEST_START") {
574 if ($rest =~ s/\s+ITERATE\s+(\d+)//) {
575 $repeat = $1;
576 $repeat_tests{"$test_num"} = $repeat;
577 }
578 } elsif ($rest =~ s/\sOVERRIDE\b//) {
579 # DEFAULT only
580 $override = 1;
581 # Clear previous overrides
582 %overrides = ();
583 }
584 }
585
586 if (!$skip && $rest !~ /^\s*$/) {
587 die "$name: $.: Gargbage found after $type\n$_";
415 } 588 }
416 589
417 if ($skip) { 590 if ($skip && $type eq "TEST_START") {
418 $test_num = $old_test_num; 591 $test_num = $old_test_num;
419 $repeat = $old_repeat; 592 $repeat = $old_repeat;
420 } 593 }
421 594
422 } elsif (/^\s*DEFAULTS(.*)$/) { 595 } elsif (/^\s*ELSE\b(.*)$/) {
423 $default = 1; 596 if (!$if) {
424 597 die "$name: $.: ELSE found with out matching IF section\n$_";
598 }
425 $rest = $1; 599 $rest = $1;
426 600 if ($if_set) {
427 if ($rest =~ /\s+SKIP(.*)/) {
428 $rest = $1;
429 $skip = 1; 601 $skip = 1;
602 $rest = "";
430 } else { 603 } else {
431 $skip = 0; 604 $skip = 0;
605
606 if ($rest =~ /\sIF\s+(.*)/) {
607 # May be a ELSE IF section.
608 if (!process_if($name, $1)) {
609 $skip = 1;
610 }
611 $rest = "";
612 } else {
613 $if = 0;
614 }
432 } 615 }
433 616
434 if ($rest !~ /^\s*$/) { 617 if ($rest !~ /^\s*$/) {
435 die "$name: $.: Gargbage found after DEFAULTS\n$_"; 618 die "$name: $.: Gargbage found after DEFAULTS\n$_";
436 } 619 }
437 620
621 } elsif (/^\s*INCLUDE\s+(\S+)/) {
622
623 next if ($skip);
624
625 if (!$default) {
626 die "$name: $.: INCLUDE can only be done in default sections\n$_";
627 }
628
629 my $file = process_variables($1);
630
631 if ($file !~ m,^/,) {
632 # check the path of the config file first
633 if ($config =~ m,(.*)/,) {
634 if (-f "$1/$file") {
635 $file = "$1/$file";
636 }
637 }
638 }
639
640 if ( ! -r $file ) {
641 die "$name: $.: Can't read file $file\n$_";
642 }
643
644 if (__read_config($file, \$test_num)) {
645 $test_case = 1;
646 }
647
438 } elsif (/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) { 648 } elsif (/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) {
439 649
440 next if ($skip); 650 next if ($skip);
@@ -460,10 +670,10 @@ sub read_config {
460 } 670 }
461 671
462 if ($default || $lvalue =~ /\[\d+\]$/) { 672 if ($default || $lvalue =~ /\[\d+\]$/) {
463 set_value($lvalue, $rvalue); 673 set_value($lvalue, $rvalue, $override, \%overrides, $name);
464 } else { 674 } else {
465 my $val = "$lvalue\[$test_num\]"; 675 my $val = "$lvalue\[$test_num\]";
466 set_value($val, $rvalue); 676 set_value($val, $rvalue, $override, \%overrides, $name);
467 677
468 if ($repeat > 1) { 678 if ($repeat > 1) {
469 $repeats{$val} = $repeat; 679 $repeats{$val} = $repeat;
@@ -490,13 +700,26 @@ sub read_config {
490 } 700 }
491 } 701 }
492 702
493 close(IN);
494
495 if ($test_num) { 703 if ($test_num) {
496 $test_num += $repeat - 1; 704 $test_num += $repeat - 1;
497 $opt{"NUM_TESTS"} = $test_num; 705 $opt{"NUM_TESTS"} = $test_num;
498 } 706 }
499 707
708 close($in);
709
710 $$current_test_num = $test_num;
711
712 return $test_case;
713}
714
715sub read_config {
716 my ($config) = @_;
717
718 my $test_case;
719 my $test_num = 0;
720
721 $test_case = __read_config $config, \$test_num;
722
500 # make sure we have all mandatory configs 723 # make sure we have all mandatory configs
501 get_ktest_configs; 724 get_ktest_configs;
502 725
@@ -603,8 +826,20 @@ sub doprint {
603} 826}
604 827
605sub run_command; 828sub run_command;
829sub start_monitor;
830sub end_monitor;
831sub wait_for_monitor;
606 832
607sub reboot { 833sub reboot {
834 my ($time) = @_;
835
836 if (defined($time)) {
837 start_monitor;
838 # flush out current monitor
839 # May contain the reboot success line
840 wait_for_monitor 1;
841 }
842
608 # try to reboot normally 843 # try to reboot normally
609 if (run_command $reboot) { 844 if (run_command $reboot) {
610 if (defined($powercycle_after_reboot)) { 845 if (defined($powercycle_after_reboot)) {
@@ -615,12 +850,17 @@ sub reboot {
615 # nope? power cycle it. 850 # nope? power cycle it.
616 run_command "$power_cycle"; 851 run_command "$power_cycle";
617 } 852 }
853
854 if (defined($time)) {
855 wait_for_monitor($time, $reboot_success_line);
856 end_monitor;
857 }
618} 858}
619 859
620sub do_not_reboot { 860sub do_not_reboot {
621 my $i = $iteration; 861 my $i = $iteration;
622 862
623 return $test_type eq "build" || 863 return $test_type eq "build" || $no_reboot ||
624 ($test_type eq "patchcheck" && $opt{"PATCHCHECK_TYPE[$i]"} eq "build") || 864 ($test_type eq "patchcheck" && $opt{"PATCHCHECK_TYPE[$i]"} eq "build") ||
625 ($test_type eq "bisect" && $opt{"BISECT_TYPE[$i]"} eq "build"); 865 ($test_type eq "bisect" && $opt{"BISECT_TYPE[$i]"} eq "build");
626} 866}
@@ -693,16 +933,29 @@ sub end_monitor {
693} 933}
694 934
695sub wait_for_monitor { 935sub wait_for_monitor {
696 my ($time) = @_; 936 my ($time, $stop) = @_;
937 my $full_line = "";
697 my $line; 938 my $line;
939 my $booted = 0;
698 940
699 doprint "** Wait for monitor to settle down **\n"; 941 doprint "** Wait for monitor to settle down **\n";
700 942
701 # read the monitor and wait for the system to calm down 943 # read the monitor and wait for the system to calm down
702 do { 944 while (!$booted) {
703 $line = wait_for_input($monitor_fp, $time); 945 $line = wait_for_input($monitor_fp, $time);
704 print "$line" if (defined($line)); 946 last if (!defined($line));
705 } while (defined($line)); 947 print "$line";
948 $full_line .= $line;
949
950 if (defined($stop) && $full_line =~ /$stop/) {
951 doprint "wait for monitor detected $stop\n";
952 $booted = 1;
953 }
954
955 if ($line =~ /\n/) {
956 $full_line = "";
957 }
958 }
706 print "** Monitor flushed **\n"; 959 print "** Monitor flushed **\n";
707} 960}
708 961
@@ -719,10 +972,7 @@ sub fail {
719 # no need to reboot for just building. 972 # no need to reboot for just building.
720 if (!do_not_reboot) { 973 if (!do_not_reboot) {
721 doprint "REBOOTING\n"; 974 doprint "REBOOTING\n";
722 reboot; 975 reboot $sleep_time;
723 start_monitor;
724 wait_for_monitor $sleep_time;
725 end_monitor;
726 } 976 }
727 977
728 my $name = ""; 978 my $name = "";
@@ -854,9 +1104,12 @@ sub get_grub_index {
854 open(IN, "$ssh_grub |") 1104 open(IN, "$ssh_grub |")
855 or die "unable to get menu.lst"; 1105 or die "unable to get menu.lst";
856 1106
1107 my $found = 0;
1108
857 while (<IN>) { 1109 while (<IN>) {
858 if (/^\s*title\s+$grub_menu\s*$/) { 1110 if (/^\s*title\s+$grub_menu\s*$/) {
859 $grub_number++; 1111 $grub_number++;
1112 $found = 1;
860 last; 1113 last;
861 } elsif (/^\s*title\s/) { 1114 } elsif (/^\s*title\s/) {
862 $grub_number++; 1115 $grub_number++;
@@ -865,7 +1118,7 @@ sub get_grub_index {
865 close(IN); 1118 close(IN);
866 1119
867 die "Could not find '$grub_menu' in /boot/grub/menu on $machine" 1120 die "Could not find '$grub_menu' in /boot/grub/menu on $machine"
868 if ($grub_number < 0); 1121 if (!$found);
869 doprint "$grub_number\n"; 1122 doprint "$grub_number\n";
870} 1123}
871 1124
@@ -902,7 +1155,8 @@ sub wait_for_input
902 1155
903sub reboot_to { 1156sub reboot_to {
904 if ($reboot_type eq "grub") { 1157 if ($reboot_type eq "grub") {
905 run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch && reboot)'"; 1158 run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'";
1159 reboot;
906 return; 1160 return;
907 } 1161 }
908 1162
@@ -1083,6 +1337,8 @@ sub do_post_install {
1083 1337
1084sub install { 1338sub install {
1085 1339
1340 return if ($no_install);
1341
1086 run_scp "$outputdir/$build_target", "$target_image" or 1342 run_scp "$outputdir/$build_target", "$target_image" or
1087 dodie "failed to copy image"; 1343 dodie "failed to copy image";
1088 1344
@@ -1140,6 +1396,11 @@ sub get_version {
1140} 1396}
1141 1397
1142sub start_monitor_and_boot { 1398sub start_monitor_and_boot {
1399 # Make sure the stable kernel has finished booting
1400 start_monitor;
1401 wait_for_monitor 5;
1402 end_monitor;
1403
1143 get_grub_index; 1404 get_grub_index;
1144 get_version; 1405 get_version;
1145 install; 1406 install;
@@ -1250,6 +1511,10 @@ sub build {
1250 1511
1251 unlink $buildlog; 1512 unlink $buildlog;
1252 1513
1514 # Failed builds should not reboot the target
1515 my $save_no_reboot = $no_reboot;
1516 $no_reboot = 1;
1517
1253 if (defined($pre_build)) { 1518 if (defined($pre_build)) {
1254 my $ret = run_command $pre_build; 1519 my $ret = run_command $pre_build;
1255 if (!$ret && defined($pre_build_die) && 1520 if (!$ret && defined($pre_build_die) &&
@@ -1272,15 +1537,15 @@ sub build {
1272 # allow for empty configs 1537 # allow for empty configs
1273 run_command "touch $output_config"; 1538 run_command "touch $output_config";
1274 1539
1275 run_command "mv $output_config $outputdir/config_temp" or 1540 if (!$noclean) {
1276 dodie "moving .config"; 1541 run_command "mv $output_config $outputdir/config_temp" or
1542 dodie "moving .config";
1277 1543
1278 if (!$noclean && !run_command "$make mrproper") { 1544 run_command "$make mrproper" or dodie "make mrproper";
1279 dodie "make mrproper";
1280 }
1281 1545
1282 run_command "mv $outputdir/config_temp $output_config" or 1546 run_command "mv $outputdir/config_temp $output_config" or
1283 dodie "moving config_temp"; 1547 dodie "moving config_temp";
1548 }
1284 1549
1285 } elsif (!$noclean) { 1550 } elsif (!$noclean) {
1286 unlink "$output_config"; 1551 unlink "$output_config";
@@ -1318,10 +1583,15 @@ sub build {
1318 1583
1319 if (!$build_ret) { 1584 if (!$build_ret) {
1320 # bisect may need this to pass 1585 # bisect may need this to pass
1321 return 0 if ($in_bisect); 1586 if ($in_bisect) {
1587 $no_reboot = $save_no_reboot;
1588 return 0;
1589 }
1322 fail "failed build" and return 0; 1590 fail "failed build" and return 0;
1323 } 1591 }
1324 1592
1593 $no_reboot = $save_no_reboot;
1594
1325 return 1; 1595 return 1;
1326} 1596}
1327 1597
@@ -1356,10 +1626,7 @@ sub success {
1356 1626
1357 if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) { 1627 if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) {
1358 doprint "Reboot and wait $sleep_time seconds\n"; 1628 doprint "Reboot and wait $sleep_time seconds\n";
1359 reboot; 1629 reboot $sleep_time;
1360 start_monitor;
1361 wait_for_monitor $sleep_time;
1362 end_monitor;
1363 } 1630 }
1364} 1631}
1365 1632
@@ -1500,10 +1767,7 @@ sub run_git_bisect {
1500 1767
1501sub bisect_reboot { 1768sub bisect_reboot {
1502 doprint "Reboot and sleep $bisect_sleep_time seconds\n"; 1769 doprint "Reboot and sleep $bisect_sleep_time seconds\n";
1503 reboot; 1770 reboot $bisect_sleep_time;
1504 start_monitor;
1505 wait_for_monitor $bisect_sleep_time;
1506 end_monitor;
1507} 1771}
1508 1772
1509# returns 1 on success, 0 on failure, -1 on skip 1773# returns 1 on success, 0 on failure, -1 on skip
@@ -2066,10 +2330,7 @@ sub config_bisect {
2066 2330
2067sub patchcheck_reboot { 2331sub patchcheck_reboot {
2068 doprint "Reboot and sleep $patchcheck_sleep_time seconds\n"; 2332 doprint "Reboot and sleep $patchcheck_sleep_time seconds\n";
2069 reboot; 2333 reboot $patchcheck_sleep_time;
2070 start_monitor;
2071 wait_for_monitor $patchcheck_sleep_time;
2072 end_monitor;
2073} 2334}
2074 2335
2075sub patchcheck { 2336sub patchcheck {
@@ -2178,12 +2439,31 @@ sub patchcheck {
2178} 2439}
2179 2440
2180my %depends; 2441my %depends;
2442my %depcount;
2181my $iflevel = 0; 2443my $iflevel = 0;
2182my @ifdeps; 2444my @ifdeps;
2183 2445
2184# prevent recursion 2446# prevent recursion
2185my %read_kconfigs; 2447my %read_kconfigs;
2186 2448
2449sub add_dep {
2450 # $config depends on $dep
2451 my ($config, $dep) = @_;
2452
2453 if (defined($depends{$config})) {
2454 $depends{$config} .= " " . $dep;
2455 } else {
2456 $depends{$config} = $dep;
2457 }
2458
2459 # record the number of configs depending on $dep
2460 if (defined $depcount{$dep}) {
2461 $depcount{$dep}++;
2462 } else {
2463 $depcount{$dep} = 1;
2464 }
2465}
2466
2187# taken from streamline_config.pl 2467# taken from streamline_config.pl
2188sub read_kconfig { 2468sub read_kconfig {
2189 my ($kconfig) = @_; 2469 my ($kconfig) = @_;
@@ -2230,30 +2510,19 @@ sub read_kconfig {
2230 $config = $2; 2510 $config = $2;
2231 2511
2232 for (my $i = 0; $i < $iflevel; $i++) { 2512 for (my $i = 0; $i < $iflevel; $i++) {
2233 if ($i) { 2513 add_dep $config, $ifdeps[$i];
2234 $depends{$config} .= " " . $ifdeps[$i];
2235 } else {
2236 $depends{$config} = $ifdeps[$i];
2237 }
2238 $state = "DEP";
2239 } 2514 }
2240 2515
2241 # collect the depends for the config 2516 # collect the depends for the config
2242 } elsif ($state eq "NEW" && /^\s*depends\s+on\s+(.*)$/) { 2517 } elsif ($state eq "NEW" && /^\s*depends\s+on\s+(.*)$/) {
2243 2518
2244 if (defined($depends{$1})) { 2519 add_dep $config, $1;
2245 $depends{$config} .= " " . $1;
2246 } else {
2247 $depends{$config} = $1;
2248 }
2249 2520
2250 # Get the configs that select this config 2521 # Get the configs that select this config
2251 } elsif ($state ne "NONE" && /^\s*select\s+(\S+)/) { 2522 } elsif ($state eq "NEW" && /^\s*select\s+(\S+)/) {
2252 if (defined($depends{$1})) { 2523
2253 $depends{$1} .= " " . $config; 2524 # selected by depends on config
2254 } else { 2525 add_dep $1, $config;
2255 $depends{$1} = $config;
2256 }
2257 2526
2258 # Check for if statements 2527 # Check for if statements
2259 } elsif (/^if\s+(.*\S)\s*$/) { 2528 } elsif (/^if\s+(.*\S)\s*$/) {
@@ -2365,11 +2634,18 @@ sub make_new_config {
2365 close OUT; 2634 close OUT;
2366} 2635}
2367 2636
2637sub chomp_config {
2638 my ($config) = @_;
2639
2640 $config =~ s/CONFIG_//;
2641
2642 return $config;
2643}
2644
2368sub get_depends { 2645sub get_depends {
2369 my ($dep) = @_; 2646 my ($dep) = @_;
2370 2647
2371 my $kconfig = $dep; 2648 my $kconfig = chomp_config $dep;
2372 $kconfig =~ s/CONFIG_//;
2373 2649
2374 $dep = $depends{"$kconfig"}; 2650 $dep = $depends{"$kconfig"};
2375 2651
@@ -2419,8 +2695,7 @@ sub test_this_config {
2419 return undef; 2695 return undef;
2420 } 2696 }
2421 2697
2422 my $kconfig = $config; 2698 my $kconfig = chomp_config $config;
2423 $kconfig =~ s/CONFIG_//;
2424 2699
2425 # Test dependencies first 2700 # Test dependencies first
2426 if (defined($depends{"$kconfig"})) { 2701 if (defined($depends{"$kconfig"})) {
@@ -2510,6 +2785,14 @@ sub make_min_config {
2510 2785
2511 my @config_keys = keys %min_configs; 2786 my @config_keys = keys %min_configs;
2512 2787
2788 # All configs need a depcount
2789 foreach my $config (@config_keys) {
2790 my $kconfig = chomp_config $config;
2791 if (!defined $depcount{$kconfig}) {
2792 $depcount{$kconfig} = 0;
2793 }
2794 }
2795
2513 # Remove anything that was set by the make allnoconfig 2796 # Remove anything that was set by the make allnoconfig
2514 # we shouldn't need them as they get set for us anyway. 2797 # we shouldn't need them as they get set for us anyway.
2515 foreach my $config (@config_keys) { 2798 foreach my $config (@config_keys) {
@@ -2548,8 +2831,13 @@ sub make_min_config {
2548 # Now disable each config one by one and do a make oldconfig 2831 # Now disable each config one by one and do a make oldconfig
2549 # till we find a config that changes our list. 2832 # till we find a config that changes our list.
2550 2833
2551 # Put configs that did not modify the config at the end.
2552 my @test_configs = keys %min_configs; 2834 my @test_configs = keys %min_configs;
2835
2836 # Sort keys by who is most dependent on
2837 @test_configs = sort { $depcount{chomp_config($b)} <=> $depcount{chomp_config($a)} }
2838 @test_configs ;
2839
2840 # Put configs that did not modify the config at the end.
2553 my $reset = 1; 2841 my $reset = 1;
2554 for (my $i = 0; $i < $#test_configs; $i++) { 2842 for (my $i = 0; $i < $#test_configs; $i++) {
2555 if (!defined($nochange_config{$test_configs[0]})) { 2843 if (!defined($nochange_config{$test_configs[0]})) {
@@ -2659,10 +2947,7 @@ sub make_min_config {
2659 } 2947 }
2660 2948
2661 doprint "Reboot and wait $sleep_time seconds\n"; 2949 doprint "Reboot and wait $sleep_time seconds\n";
2662 reboot; 2950 reboot $sleep_time;
2663 start_monitor;
2664 wait_for_monitor $sleep_time;
2665 end_monitor;
2666 } 2951 }
2667 2952
2668 success $i; 2953 success $i;
@@ -2783,6 +3068,9 @@ sub set_test_option {
2783# First we need to do is the builds 3068# First we need to do is the builds
2784for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { 3069for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2785 3070
3071 # Do not reboot on failing test options
3072 $no_reboot = 1;
3073
2786 $iteration = $i; 3074 $iteration = $i;
2787 3075
2788 my $makecmd = set_test_option("MAKE_CMD", $i); 3076 my $makecmd = set_test_option("MAKE_CMD", $i);
@@ -2811,6 +3099,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2811 $reboot_type = set_test_option("REBOOT_TYPE", $i); 3099 $reboot_type = set_test_option("REBOOT_TYPE", $i);
2812 $grub_menu = set_test_option("GRUB_MENU", $i); 3100 $grub_menu = set_test_option("GRUB_MENU", $i);
2813 $post_install = set_test_option("POST_INSTALL", $i); 3101 $post_install = set_test_option("POST_INSTALL", $i);
3102 $no_install = set_test_option("NO_INSTALL", $i);
2814 $reboot_script = set_test_option("REBOOT_SCRIPT", $i); 3103 $reboot_script = set_test_option("REBOOT_SCRIPT", $i);
2815 $reboot_on_error = set_test_option("REBOOT_ON_ERROR", $i); 3104 $reboot_on_error = set_test_option("REBOOT_ON_ERROR", $i);
2816 $poweroff_on_error = set_test_option("POWEROFF_ON_ERROR", $i); 3105 $poweroff_on_error = set_test_option("POWEROFF_ON_ERROR", $i);
@@ -2832,6 +3121,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2832 $console = set_test_option("CONSOLE", $i); 3121 $console = set_test_option("CONSOLE", $i);
2833 $detect_triplefault = set_test_option("DETECT_TRIPLE_FAULT", $i); 3122 $detect_triplefault = set_test_option("DETECT_TRIPLE_FAULT", $i);
2834 $success_line = set_test_option("SUCCESS_LINE", $i); 3123 $success_line = set_test_option("SUCCESS_LINE", $i);
3124 $reboot_success_line = set_test_option("REBOOT_SUCCESS_LINE", $i);
2835 $stop_after_success = set_test_option("STOP_AFTER_SUCCESS", $i); 3125 $stop_after_success = set_test_option("STOP_AFTER_SUCCESS", $i);
2836 $stop_after_failure = set_test_option("STOP_AFTER_FAILURE", $i); 3126 $stop_after_failure = set_test_option("STOP_AFTER_FAILURE", $i);
2837 $stop_test_after = set_test_option("STOP_TEST_AFTER", $i); 3127 $stop_test_after = set_test_option("STOP_TEST_AFTER", $i);
@@ -2850,9 +3140,11 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2850 3140
2851 chdir $builddir || die "can't change directory to $builddir"; 3141 chdir $builddir || die "can't change directory to $builddir";
2852 3142
2853 if (!-d $tmpdir) { 3143 foreach my $dir ($tmpdir, $outputdir) {
2854 mkpath($tmpdir) or 3144 if (!-d $dir) {
2855 die "can't create $tmpdir"; 3145 mkpath($dir) or
3146 die "can't create $dir";
3147 }
2856 } 3148 }
2857 3149
2858 $ENV{"SSH_USER"} = $ssh_user; 3150 $ENV{"SSH_USER"} = $ssh_user;
@@ -2889,8 +3181,11 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2889 $run_type = "ERROR"; 3181 $run_type = "ERROR";
2890 } 3182 }
2891 3183
3184 my $installme = "";
3185 $installme = " no_install" if ($no_install);
3186
2892 doprint "\n\n"; 3187 doprint "\n\n";
2893 doprint "RUNNING TEST $i of $opt{NUM_TESTS} with option $test_type $run_type\n\n"; 3188 doprint "RUNNING TEST $i of $opt{NUM_TESTS} with option $test_type $run_type$installme\n\n";
2894 3189
2895 unlink $dmesg; 3190 unlink $dmesg;
2896 unlink $buildlog; 3191 unlink $buildlog;
@@ -2911,6 +3206,9 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2911 die "failed to checkout $checkout"; 3206 die "failed to checkout $checkout";
2912 } 3207 }
2913 3208
3209 $no_reboot = 0;
3210
3211
2914 if ($test_type eq "bisect") { 3212 if ($test_type eq "bisect") {
2915 bisect $i; 3213 bisect $i;
2916 next; 3214 next;
@@ -2929,6 +3227,13 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
2929 build $build_type or next; 3227 build $build_type or next;
2930 } 3228 }
2931 3229
3230 if ($test_type eq "install") {
3231 get_version;
3232 install;
3233 success $i;
3234 next;
3235 }
3236
2932 if ($test_type ne "build") { 3237 if ($test_type ne "build") {
2933 my $failed = 0; 3238 my $failed = 0;
2934 start_monitor_and_boot or $failed = 1; 3239 start_monitor_and_boot or $failed = 1;
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index b8bcd14b5a4d..dbedfa196727 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -72,6 +72,128 @@
72# the same option name under the same test or as default 72# the same option name under the same test or as default
73# ktest will fail to execute, and no tests will run. 73# ktest will fail to execute, and no tests will run.
74# 74#
75# DEFAULTS OVERRIDE
76#
77# Options defined in the DEFAULTS section can not be duplicated
78# even if they are defined in two different DEFAULT sections.
79# This is done to catch mistakes where an option is added but
80# the previous option was forgotten about and not commented.
81#
82# The OVERRIDE keyword can be added to a section to allow this
83# section to override other DEFAULT sections values that have
84# been defined previously. It will only override options that
85# have been defined before its use. Options defined later
86# in a non override section will still error. The same option
87# can not be defined in the same section even if that section
88# is marked OVERRIDE.
89#
90#
91#
92# Both TEST_START and DEFAULTS sections can also have the IF keyword
93# The value after the IF must evaluate into a 0 or non 0 positive
94# integer, and can use the config variables (explained below).
95#
96# DEFAULTS IF ${IS_X86_32}
97#
98# The above will process the DEFAULTS section if the config
99# variable IS_X86_32 evaluates to a non zero positive integer
100# otherwise if it evaluates to zero, it will act the same
101# as if the SKIP keyword was used.
102#
103# The ELSE keyword can be used directly after a section with
104# a IF statement.
105#
106# TEST_START IF ${RUN_NET_TESTS}
107# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-network
108#
109# ELSE
110#
111# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-normal
112#
113#
114# The ELSE keyword can also contain an IF statement to allow multiple
115# if then else sections. But all the sections must be either
116# DEFAULT or TEST_START, they can not be a mixture.
117#
118# TEST_START IF ${RUN_NET_TESTS}
119# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-network
120#
121# ELSE IF ${RUN_DISK_TESTS}
122# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-tests
123#
124# ELSE IF ${RUN_CPU_TESTS}
125# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-cpu
126#
127# ELSE
128# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-network
129#
130# The if statement may also have comparisons that will and for
131# == and !=, strings may be used for both sides.
132#
133# BOX_TYPE := x86_32
134#
135# DEFAULTS IF ${BOX_TYPE} == x86_32
136# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-32
137# ELSE
138# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-64
139#
140# The DEFINED keyword can be used by the IF statements too.
141# It returns true if the given config variable or option has been defined
142# or false otherwise.
143#
144#
145# DEFAULTS IF DEFINED USE_CC
146# CC := ${USE_CC}
147# ELSE
148# CC := gcc
149#
150#
151# As well as NOT DEFINED.
152#
153# DEFAULTS IF NOT DEFINED MAKE_CMD
154# MAKE_CMD := make ARCH=x86
155#
156#
157# And/or ops (&&,||) may also be used to make complex conditionals.
158#
159# TEST_START IF (DEFINED ALL_TESTS || ${MYTEST} == boottest) && ${MACHINE} == gandalf
160#
161# Notice the use of paranthesis. Without any paranthesis the above would be
162# processed the same as:
163#
164# TEST_START IF DEFINED ALL_TESTS || (${MYTEST} == boottest && ${MACHINE} == gandalf)
165#
166#
167#
168# INCLUDE file
169#
170# The INCLUDE keyword may be used in DEFAULT sections. This will
171# read another config file and process that file as well. The included
172# file can include other files, add new test cases or default
173# statements. Config variables will be passed to these files and changes
174# to config variables will be seen by top level config files. Including
175# a file is processed just like the contents of the file was cut and pasted
176# into the top level file, except, that include files that end with
177# TEST_START sections will have that section ended at the end of
178# the include file. That is, an included file is included followed
179# by another DEFAULT keyword.
180#
181# Unlike other files referenced in this config, the file path does not need
182# to be absolute. If the file does not start with '/', then the directory
183# that the current config file was located in is used. If no config by the
184# given name is found there, then the current directory is searched.
185#
186# INCLUDE myfile
187# DEFAULT
188#
189# is the same as:
190#
191# INCLUDE myfile
192#
193# Note, if the include file does not contain a full path, the file is
194# searched first by the location of the original include file, and then
195# by the location that ktest.pl was executed in.
196#
75 197
76#### Config variables #### 198#### Config variables ####
77# 199#
@@ -253,9 +375,10 @@
253 375
254# The default test type (default test) 376# The default test type (default test)
255# The test types may be: 377# The test types may be:
256# build - only build the kernel, do nothing else 378# build - only build the kernel, do nothing else
257# boot - build and boot the kernel 379# install - build and install, but do nothing else (does not reboot)
258# test - build, boot and if TEST is set, run the test script 380# boot - build, install, and boot the kernel
381# test - build, boot and if TEST is set, run the test script
259# (If TEST is not set, it defaults back to boot) 382# (If TEST is not set, it defaults back to boot)
260# bisect - Perform a bisect on the kernel (see BISECT_TYPE below) 383# bisect - Perform a bisect on the kernel (see BISECT_TYPE below)
261# patchcheck - Do a test on a series of commits in git (see PATCHCHECK below) 384# patchcheck - Do a test on a series of commits in git (see PATCHCHECK below)
@@ -293,6 +416,13 @@
293# or on some systems: 416# or on some systems:
294#POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION 417#POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION
295 418
419# If for some reason you just want to boot the kernel and you do not
420# want the test to install anything new. For example, you may just want
421# to boot test the same kernel over and over and do not want to go through
422# the hassle of installing anything, you can set this option to 1
423# (default 0)
424#NO_INSTALL = 1
425
296# If there is a script that you require to run before the build is done 426# If there is a script that you require to run before the build is done
297# you can specify it with PRE_BUILD. 427# you can specify it with PRE_BUILD.
298# 428#
@@ -415,6 +545,14 @@
415# (default "login:") 545# (default "login:")
416#SUCCESS_LINE = login: 546#SUCCESS_LINE = login:
417 547
548# To speed up between reboots, defining a line that the
549# default kernel produces that represents that the default
550# kernel has successfully booted and can be used to pass
551# a new test kernel to it. Otherwise ktest.pl will wait till
552# SLEEP_TIME to continue.
553# (default undefined)
554#REBOOT_SUCCESS_LINE = login:
555
418# In case the console constantly fills the screen, having 556# In case the console constantly fills the screen, having
419# a specified time to stop the test after success is recommended. 557# a specified time to stop the test after success is recommended.
420# (in seconds) 558# (in seconds)
@@ -480,6 +618,8 @@
480# another test. If a reboot to the reliable kernel happens, 618# another test. If a reboot to the reliable kernel happens,
481# we wait SLEEP_TIME for the console to stop producing output 619# we wait SLEEP_TIME for the console to stop producing output
482# before starting the next test. 620# before starting the next test.
621#
622# You can speed up reboot times even more by setting REBOOT_SUCCESS_LINE.
483# (default 60) 623# (default 60)
484#SLEEP_TIME = 60 624#SLEEP_TIME = 60
485 625