aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-devices-node96
-rw-r--r--Documentation/ABI/testing/ima_policy3
-rw-r--r--Documentation/cgroups/memory.txt66
-rw-r--r--Documentation/cgroups/resource_counter.txt7
-rw-r--r--Documentation/devicetree/bindings/arm/davinci/nand.txt8
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt27
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt81
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-ocores.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt20
-rw-r--r--Documentation/devicetree/bindings/mtd/denali-nand.txt23
-rw-r--r--Documentation/devicetree/bindings/mtd/flctl-nand.txt49
-rw-r--r--Documentation/devicetree/bindings/mtd/fsmc-nand.txt12
-rw-r--r--Documentation/devicetree/bindings/mtd/m25p80.txt29
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd-physmap.txt3
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tiecap.txt23
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt23
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt31
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm.txt17
-rw-r--r--Documentation/devicetree/bindings/pwm/spear-pwm.txt18
-rw-r--r--Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt17
-rw-r--r--Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt17
-rw-r--r--Documentation/devicetree/bindings/pwm/vt8500-pwm.txt17
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/spi_atmel.txt26
-rw-r--r--Documentation/x86/boot.txt3
-rw-r--r--Documentation/xtensa/atomctl.txt44
-rw-r--r--Makefile6
-rw-r--r--arch/Kconfig19
-rw-r--r--arch/arm/boot/dts/imx28-cfa10049.dts24
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi14
-rw-r--r--arch/arm/boot/dts/spear300.dtsi8
-rw-r--r--arch/arm/boot/dts/spear310.dtsi8
-rw-r--r--arch/arm/boot/dts/spear320.dtsi8
-rw-r--r--arch/arm/boot/dts/spear600.dtsi8
-rw-r--r--arch/arm/configs/nhk8815_defconfig2
-rw-r--r--arch/arm/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm/kernel/calls.S1
-rw-r--r--arch/arm/mach-nomadik/board-nhk8815.c71
-rw-r--r--arch/arm/mach-nomadik/include/mach/fsmc.h29
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c42
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c6
-rw-r--r--arch/arm/mach-omap2/i2c.c19
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c12
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c3
-rw-r--r--arch/arm/mach-u300/core.c14
-rw-r--r--arch/cris/include/asm/io.h39
-rw-r--r--arch/cris/kernel/module.c2
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/parisc/kernel/module.c2
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/sparc/kernel/module.c4
-rw-r--r--arch/tile/include/asm/elf.h2
-rw-r--r--arch/tile/include/asm/ptrace.h3
-rw-r--r--arch/tile/include/uapi/asm/ptrace.h8
-rw-r--r--arch/tile/kernel/module.c2
-rw-r--r--arch/tile/kernel/pci.c4
-rw-r--r--arch/tile/kernel/pci_gx.c3
-rw-r--r--arch/tile/kernel/ptrace.c140
-rw-r--r--arch/unicore32/kernel/module.c3
-rw-r--r--arch/x86/kernel/cpu/proc.c7
-rw-r--r--arch/x86/kernel/irqinit.c40
-rw-r--r--arch/x86/kernel/traps.c6
-rw-r--r--arch/x86/platform/iris/iris.c67
-rw-r--r--arch/x86/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/syscalls/syscall_64.tbl1
-rw-r--r--arch/xtensa/Kconfig21
-rw-r--r--arch/xtensa/Kconfig.debug22
-rw-r--r--arch/xtensa/Makefile20
-rw-r--r--arch/xtensa/boot/Makefile25
-rw-r--r--arch/xtensa/boot/boot-elf/Makefile26
-rw-r--r--arch/xtensa/boot/boot-redboot/Makefile26
-rw-r--r--arch/xtensa/boot/boot-uboot/Makefile14
-rw-r--r--arch/xtensa/boot/dts/lx60.dts11
-rw-r--r--arch/xtensa/boot/dts/ml605.dts11
-rw-r--r--arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi26
-rw-r--r--arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi18
-rw-r--r--arch/xtensa/boot/dts/xtfpga.dtsi56
-rw-r--r--arch/xtensa/include/asm/atomic.h271
-rw-r--r--arch/xtensa/include/asm/barrier.h6
-rw-r--r--arch/xtensa/include/asm/bitops.h127
-rw-r--r--arch/xtensa/include/asm/bootparam.h20
-rw-r--r--arch/xtensa/include/asm/cacheasm.h1
-rw-r--r--arch/xtensa/include/asm/cacheflush.h3
-rw-r--r--arch/xtensa/include/asm/checksum.h19
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h74
-rw-r--r--arch/xtensa/include/asm/current.h2
-rw-r--r--arch/xtensa/include/asm/delay.h7
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h6
-rw-r--r--arch/xtensa/include/asm/elf.h10
-rw-r--r--arch/xtensa/include/asm/highmem.h1
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h55
-rw-r--r--arch/xtensa/include/asm/mmu_context.h2
-rw-r--r--arch/xtensa/include/asm/nommu_context.h2
-rw-r--r--arch/xtensa/include/asm/page.h20
-rw-r--r--arch/xtensa/include/asm/pci-bridge.h2
-rw-r--r--arch/xtensa/include/asm/pci.h2
-rw-r--r--arch/xtensa/include/asm/pgalloc.h2
-rw-r--r--arch/xtensa/include/asm/pgtable.h8
-rw-r--r--arch/xtensa/include/asm/platform.h1
-rw-r--r--arch/xtensa/include/asm/processor.h10
-rw-r--r--arch/xtensa/include/asm/prom.h6
-rw-r--r--arch/xtensa/include/asm/ptrace.h4
-rw-r--r--arch/xtensa/include/asm/regs.h5
-rw-r--r--arch/xtensa/include/asm/spinlock.h188
-rw-r--r--arch/xtensa/include/asm/syscall.h11
-rw-r--r--arch/xtensa/include/asm/traps.h23
-rw-r--r--arch/xtensa/include/asm/uaccess.h43
-rw-r--r--arch/xtensa/kernel/Makefile8
-rw-r--r--arch/xtensa/kernel/align.S4
-rw-r--r--arch/xtensa/kernel/asm-offsets.c5
-rw-r--r--arch/xtensa/kernel/coprocessor.S25
-rw-r--r--arch/xtensa/kernel/entry.S67
-rw-r--r--arch/xtensa/kernel/head.S21
-rw-r--r--arch/xtensa/kernel/irq.c132
-rw-r--r--arch/xtensa/kernel/module.c2
-rw-r--r--arch/xtensa/kernel/platform.c1
-rw-r--r--arch/xtensa/kernel/process.c2
-rw-r--r--arch/xtensa/kernel/ptrace.c3
-rw-r--r--arch/xtensa/kernel/setup.c279
-rw-r--r--arch/xtensa/kernel/signal.c8
-rw-r--r--arch/xtensa/kernel/syscall.c1
-rw-r--r--arch/xtensa/kernel/time.c7
-rw-r--r--arch/xtensa/kernel/traps.c18
-rw-r--r--arch/xtensa/kernel/vectors.S67
-rw-r--r--arch/xtensa/lib/checksum.S15
-rw-r--r--arch/xtensa/lib/memcopy.S6
-rw-r--r--arch/xtensa/lib/pci-auto.c9
-rw-r--r--arch/xtensa/lib/strncpy_user.S4
-rw-r--r--arch/xtensa/lib/strnlen_user.S1
-rw-r--r--arch/xtensa/lib/usercopy.S1
-rw-r--r--arch/xtensa/mm/cache.c27
-rw-r--r--arch/xtensa/mm/fault.c1
-rw-r--r--arch/xtensa/mm/init.c16
-rw-r--r--arch/xtensa/mm/misc.S51
-rw-r--r--arch/xtensa/mm/mmu.c2
-rw-r--r--arch/xtensa/mm/tlb.c9
-rw-r--r--arch/xtensa/platforms/iss/include/platform/serial.h15
-rw-r--r--arch/xtensa/platforms/iss/include/platform/simcall.h7
-rw-r--r--arch/xtensa/platforms/xtfpga/Makefile9
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/hardware.h69
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/lcd.h20
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/serial.h18
-rw-r--r--arch/xtensa/platforms/xtfpga/lcd.c76
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c301
-rw-r--r--arch/xtensa/variants/s6000/gpio.c4
-rw-r--r--drivers/atm/solos-pci.c186
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c3
-rw-r--r--drivers/char/random.c40
-rw-r--r--drivers/clk/clk-nomadik.c1
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-ich.c1
-rw-r--r--drivers/gpio/gpio-mvebu.c17
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c2
-rw-r--r--drivers/i2c/busses/Kconfig10
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-at91.c338
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c300
-rw-r--r--drivers/i2c/busses/i2c-gpio.c6
-rw-r--r--drivers/i2c/busses/i2c-mxs.c2
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c14
-rw-r--r--drivers/i2c/busses/i2c-ocores.c164
-rw-r--r--drivers/i2c/busses/i2c-omap.c226
-rw-r--r--drivers/i2c/busses/i2c-rcar.c6
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c211
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c150
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c145
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h9
-rw-r--r--drivers/isdn/mISDN/dsp_core.c3
-rw-r--r--drivers/message/fusion/mptscsih.c1
-rw-r--r--drivers/mtd/ar7part.c7
-rw-r--r--drivers/mtd/bcm63xxpart.c32
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c16
-rw-r--r--drivers/mtd/cmdlinepart.c91
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c4
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/docg3.c2
-rw-r--r--drivers/mtd/devices/docprobe.c2
-rw-r--r--drivers/mtd/devices/m25p80.c48
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c14
-rw-r--r--drivers/mtd/devices/spear_smi.c23
-rw-r--r--drivers/mtd/devices/sst25l.c10
-rw-r--r--drivers/mtd/maps/Kconfig7
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/amd76xrom.c7
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c6
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c9
-rw-r--r--drivers/mtd/maps/ck804xrom.c6
-rw-r--r--drivers/mtd/maps/esb2rom.c6
-rw-r--r--drivers/mtd/maps/fortunet.c277
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c12
-rw-r--r--drivers/mtd/maps/ichxrom.c8
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c18
-rw-r--r--drivers/mtd/maps/lantiq-flash.c8
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c4
-rw-r--r--drivers/mtd/maps/pci.c6
-rw-r--r--drivers/mtd/maps/physmap_of.c19
-rw-r--r--drivers/mtd/maps/pismo.c18
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c6
-rw-r--r--drivers/mtd/maps/sa1100-flash.c6
-rw-r--r--drivers/mtd/maps/scb2_flash.c8
-rw-r--r--drivers/mtd/maps/sun_uflash.c6
-rw-r--r--drivers/mtd/maps/vmu-flash.c10
-rw-r--r--drivers/mtd/mtd_blkdevs.c51
-rw-r--r--drivers/mtd/mtdoops.c15
-rw-r--r--drivers/mtd/nand/Kconfig34
-rw-r--r--drivers/mtd/nand/Makefile4
-rw-r--r--drivers/mtd/nand/ams-delta.c6
-rw-r--r--drivers/mtd/nand/atmel_nand.c28
-rw-r--r--drivers/mtd/nand/au1550nd.c8
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/Makefile4
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h22
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/main.c108
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c413
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c8
-rw-r--r--drivers/mtd/nand/cafe_nand.c12
-rw-r--r--drivers/mtd/nand/cs553x_nand.c3
-rw-r--r--drivers/mtd/nand/davinci_nand.c13
-rw-r--r--drivers/mtd/nand/denali.c162
-rw-r--r--drivers/mtd/nand/denali.h5
-rw-r--r--drivers/mtd/nand/denali_dt.c167
-rw-r--r--drivers/mtd/nand/denali_pci.c144
-rw-r--r--drivers/mtd/nand/diskonchip.c2
-rw-r--r--drivers/mtd/nand/docg4.c73
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c17
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c6
-rw-r--r--drivers/mtd/nand/fsl_upm.c8
-rw-r--r--drivers/mtd/nand/fsmc_nand.c106
-rw-r--r--drivers/mtd/nand/gpio.c34
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c10
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c41
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h1
-rw-r--r--drivers/mtd/nand/jz4740_nand.c14
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c6
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c6
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c8
-rw-r--r--drivers/mtd/nand/mxc_nand.c12
-rw-r--r--drivers/mtd/nand/nand_base.c114
-rw-r--r--drivers/mtd/nand/nandsim.c186
-rw-r--r--drivers/mtd/nand/ndfc.c6
-rw-r--r--drivers/mtd/nand/nomadik_nand.c235
-rw-r--r--drivers/mtd/nand/nuc900_nand.c6
-rw-r--r--drivers/mtd/nand/omap2.c2
-rw-r--r--drivers/mtd/nand/orion_nand.c4
-rw-r--r--drivers/mtd/nand/pasemi_nand.c4
-rw-r--r--drivers/mtd/nand/plat_nand.c6
-rw-r--r--drivers/mtd/nand/s3c2410.c7
-rw-r--r--drivers/mtd/nand/sh_flctl.c306
-rw-r--r--drivers/mtd/nand/sharpsl.c6
-rw-r--r--drivers/mtd/nand/socrates_nand.c6
-rw-r--r--drivers/mtd/ofpart.c5
-rw-r--r--drivers/mtd/onenand/generic.c6
-rw-r--r--drivers/mtd/onenand/omap2.c6
-rw-r--r--drivers/mtd/onenand/samsung.c4
-rw-r--r--drivers/mtd/tests/mtd_nandbiterrs.c73
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c6
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c171
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c152
-rw-r--r--drivers/mtd/tests/mtd_readtest.c44
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c88
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c44
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c124
-rw-r--r--drivers/mtd/tests/mtd_torturetest.c73
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c59
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c3
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c18
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c22
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/tun.c87
-rw-r--r--drivers/net/usb/cdc_ether.c45
-rw-r--r--drivers/net/usb/cdc_ncm.c10
-rw-r--r--drivers/net/usb/qmi_wwan.c15
-rw-r--r--drivers/net/usb/usbnet.c25
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h3
-rw-r--r--drivers/net/wimax/i2400m/usb.c6
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c8
-rw-r--r--drivers/of/base.c2
-rw-r--r--drivers/power/charger-manager.c38
-rw-r--r--drivers/pwm/Kconfig39
-rw-r--r--drivers/pwm/Makefile5
-rw-r--r--drivers/pwm/core.c29
-rw-r--r--drivers/pwm/pwm-imx.c2
-rw-r--r--drivers/pwm/pwm-lpc32xx.c23
-rw-r--r--drivers/pwm/pwm-samsung.c1
-rw-r--r--drivers/pwm/pwm-spear.c276
-rw-r--r--drivers/pwm/pwm-tiecap.c48
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c62
-rw-r--r--drivers/pwm/pwm-tipwmss.c139
-rw-r--r--drivers/pwm/pwm-tipwmss.h39
-rw-r--r--drivers/pwm/pwm-twl-led.c344
-rw-r--r--drivers/pwm/pwm-twl.c359
-rw-r--r--drivers/pwm/pwm-twl6030.c184
-rw-r--r--drivers/pwm/pwm-vt8500.c98
-rw-r--r--drivers/spi/spi-atmel.c17
-rw-r--r--drivers/spi/spi-s3c64xx.c10
-rw-r--r--drivers/spi/spi-sh-hspi.c2
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/video/backlight/locomolcd.c38
-rw-r--r--fs/ceph/export.c4
-rw-r--r--fs/jffs2/nodemgmt.c6
-rw-r--r--include/linux/asn1.h2
-rw-r--r--include/linux/bcma/bcma.h1
-rw-r--r--include/linux/blkdev.h19
-rw-r--r--include/linux/compiler-gcc4.h12
-rw-r--r--include/linux/compiler-intel.h7
-rw-r--r--include/linux/compiler.h9
-rw-r--r--include/linux/gfp.h5
-rw-r--r--include/linux/hugetlb_cgroup.h5
-rw-r--r--include/linux/i2c-omap.h2
-rw-r--r--include/linux/i2c/i2c-sh_mobile.h1
-rw-r--r--include/linux/ima.h6
-rw-r--r--include/linux/memcontrol.h209
-rw-r--r--include/linux/moduleparam.h6
-rw-r--r--include/linux/mtd/blktrans.h4
-rw-r--r--include/linux/mtd/doc2000.h22
-rw-r--r--include/linux/mtd/fsmc.h3
-rw-r--r--include/linux/mtd/gpmi-nand.h68
-rw-r--r--include/linux/mtd/map.h4
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/mtd/nand.h11
-rw-r--r--include/linux/mtd/sh_flctl.h14
-rw-r--r--include/linux/of_platform.h1
-rw-r--r--include/linux/platform_data/i2c-cbus-gpio.h27
-rw-r--r--include/linux/platform_data/mtd-nomadik-nand.h16
-rw-r--r--include/linux/pwm.h3
-rw-r--r--include/linux/res_counter.h12
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/security.h13
-rw-r--r--include/linux/slab.h48
-rw-r--r--include/linux/slab_def.h3
-rw-r--r--include/linux/slub_def.h9
-rw-r--r--include/linux/syscalls.h1
-rw-r--r--include/linux/thread_info.h2
-rw-r--r--include/linux/usb/usbnet.h3
-rw-r--r--include/net/inet_connection_sock.h1
-rw-r--r--include/net/ndisc.h7
-rw-r--r--include/trace/events/gfpflags.h1
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/linux/if_bridge.h3
-rw-r--r--include/uapi/linux/module.h8
-rw-r--r--include/uapi/linux/swab.h12
-rw-r--r--init/Kconfig2
-rw-r--r--kernel/Makefile10
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/modsign_certificate.S19
-rw-r--r--kernel/modsign_pubkey.c6
-rw-r--r--kernel/module.c441
-rw-r--r--kernel/posix-cpu-timers.c3
-rw-r--r--kernel/res_counter.c20
-rw-r--r--kernel/sys_ni.c1
-rw-r--r--kernel/watchdog.c11
-rw-r--r--lib/asn1_decoder.c8
-rw-r--r--mm/Kconfig13
-rw-r--r--mm/hugetlb.c11
-rw-r--r--mm/hugetlb_cgroup.c19
-rw-r--r--mm/kmemleak.c3
-rw-r--r--mm/memcontrol.c1242
-rw-r--r--mm/memory_hotplug.c18
-rw-r--r--mm/mprotect.c30
-rw-r--r--mm/page_alloc.c38
-rw-r--r--mm/slab.c94
-rw-r--r--mm/slab.h137
-rw-r--r--mm/slab_common.c118
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c150
-rw-r--r--mm/vmscan.c14
-rw-r--r--net/atm/atm_sysfs.c40
-rw-r--r--net/bridge/br_mdb.c22
-rw-r--r--net/bridge/br_multicast.c13
-rw-r--r--net/bridge/br_netlink.c1
-rw-r--r--net/bridge/br_private.h5
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dccp/ipv6.c3
-rw-r--r--net/ipv4/inet_connection_sock.c16
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv6/Makefile2
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/ndisc.c17
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/mac802154/ieee802154_dev.c4
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/sctp/Kconfig27
-rw-r--r--net/sctp/probe.c3
-rw-r--r--net/sctp/protocol.c4
-rw-r--r--scripts/Makefile.modsign32
-rw-r--r--scripts/coccinelle/api/d_find_alias.cocci80
-rw-r--r--security/capability.c6
-rw-r--r--security/integrity/ima/ima.h2
-rw-r--r--security/integrity/ima/ima_api.c4
-rw-r--r--security/integrity/ima/ima_main.c21
-rw-r--r--security/integrity/ima/ima_policy.c3
-rw-r--r--security/security.c10
-rw-r--r--security/selinux/nlmsgtab.c2
413 files changed, 11456 insertions, 3618 deletions
diff --git a/Documentation/ABI/stable/sysfs-devices-node b/Documentation/ABI/stable/sysfs-devices-node
index 49b82cad7003..ce259c13c36a 100644
--- a/Documentation/ABI/stable/sysfs-devices-node
+++ b/Documentation/ABI/stable/sysfs-devices-node
@@ -1,7 +1,101 @@
1What: /sys/devices/system/node/possible
2Date: October 2002
3Contact: Linux Memory Management list <linux-mm@kvack.org>
4Description:
5 Nodes that could be possibly become online at some point.
6
7What: /sys/devices/system/node/online
8Date: October 2002
9Contact: Linux Memory Management list <linux-mm@kvack.org>
10Description:
11 Nodes that are online.
12
13What: /sys/devices/system/node/has_normal_memory
14Date: October 2002
15Contact: Linux Memory Management list <linux-mm@kvack.org>
16Description:
17 Nodes that have regular memory.
18
19What: /sys/devices/system/node/has_cpu
20Date: October 2002
21Contact: Linux Memory Management list <linux-mm@kvack.org>
22Description:
23 Nodes that have one or more CPUs.
24
25What: /sys/devices/system/node/has_high_memory
26Date: October 2002
27Contact: Linux Memory Management list <linux-mm@kvack.org>
28Description:
29 Nodes that have regular or high memory.
30 Depends on CONFIG_HIGHMEM.
31
1What: /sys/devices/system/node/nodeX 32What: /sys/devices/system/node/nodeX
2Date: October 2002 33Date: October 2002
3Contact: Linux Memory Management list <linux-mm@kvack.org> 34Contact: Linux Memory Management list <linux-mm@kvack.org>
4Description: 35Description:
5 When CONFIG_NUMA is enabled, this is a directory containing 36 When CONFIG_NUMA is enabled, this is a directory containing
6 information on node X such as what CPUs are local to the 37 information on node X such as what CPUs are local to the
7 node. 38 node. Each file is detailed next.
39
40What: /sys/devices/system/node/nodeX/cpumap
41Date: October 2002
42Contact: Linux Memory Management list <linux-mm@kvack.org>
43Description:
44 The node's cpumap.
45
46What: /sys/devices/system/node/nodeX/cpulist
47Date: October 2002
48Contact: Linux Memory Management list <linux-mm@kvack.org>
49Description:
50 The CPUs associated to the node.
51
52What: /sys/devices/system/node/nodeX/meminfo
53Date: October 2002
54Contact: Linux Memory Management list <linux-mm@kvack.org>
55Description:
56 Provides information about the node's distribution and memory
57 utilization. Similar to /proc/meminfo, see Documentation/filesystems/proc.txt
58
59What: /sys/devices/system/node/nodeX/numastat
60Date: October 2002
61Contact: Linux Memory Management list <linux-mm@kvack.org>
62Description:
63 The node's hit/miss statistics, in units of pages.
64 See Documentation/numastat.txt
65
66What: /sys/devices/system/node/nodeX/distance
67Date: October 2002
68Contact: Linux Memory Management list <linux-mm@kvack.org>
69Description:
70 Distance between the node and all the other nodes
71 in the system.
72
73What: /sys/devices/system/node/nodeX/vmstat
74Date: October 2002
75Contact: Linux Memory Management list <linux-mm@kvack.org>
76Description:
77 The node's zoned virtual memory statistics.
78 This is a superset of numastat.
79
80What: /sys/devices/system/node/nodeX/compact
81Date: February 2010
82Contact: Mel Gorman <mel@csn.ul.ie>
83Description:
84 When this file is written to, all memory within that node
85 will be compacted. When it completes, memory will be freed
86 into blocks which have as many contiguous pages as possible
87
88What: /sys/devices/system/node/nodeX/scan_unevictable_pages
89Date: October 2008
90Contact: Lee Schermerhorn <lee.schermerhorn@hp.com>
91Description:
92 When set, it triggers scanning the node's unevictable lists
93 and move any pages that have become evictable onto the respective
94 zone's inactive list. See mm/vmscan.c
95
96What: /sys/devices/system/node/nodeX/hugepages/hugepages-<size>/
97Date: December 2009
98Contact: Lee Schermerhorn <lee.schermerhorn@hp.com>
99Description:
100 The node's huge page size control/query attributes.
101 See Documentation/vm/hugetlbpage.txt \ No newline at end of file
diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
index 986946613542..ec0a38ef3145 100644
--- a/Documentation/ABI/testing/ima_policy
+++ b/Documentation/ABI/testing/ima_policy
@@ -23,7 +23,7 @@ Description:
23 lsm: [[subj_user=] [subj_role=] [subj_type=] 23 lsm: [[subj_user=] [subj_role=] [subj_type=]
24 [obj_user=] [obj_role=] [obj_type=]] 24 [obj_user=] [obj_role=] [obj_type=]]
25 25
26 base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK] 26 base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK][MODULE_CHECK]
27 mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC] 27 mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
28 fsmagic:= hex value 28 fsmagic:= hex value
29 uid:= decimal value 29 uid:= decimal value
@@ -53,6 +53,7 @@ Description:
53 measure func=BPRM_CHECK 53 measure func=BPRM_CHECK
54 measure func=FILE_MMAP mask=MAY_EXEC 54 measure func=FILE_MMAP mask=MAY_EXEC
55 measure func=FILE_CHECK mask=MAY_READ uid=0 55 measure func=FILE_CHECK mask=MAY_READ uid=0
56 measure func=MODULE_CHECK uid=0
56 appraise fowner=0 57 appraise fowner=0
57 58
58 The default policy measures all executables in bprm_check, 59 The default policy measures all executables in bprm_check,
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index a25cb3fafeba..8b8c28b9864c 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -71,6 +71,11 @@ Brief summary of control files.
71 memory.oom_control # set/show oom controls. 71 memory.oom_control # set/show oom controls.
72 memory.numa_stat # show the number of memory usage per numa node 72 memory.numa_stat # show the number of memory usage per numa node
73 73
74 memory.kmem.limit_in_bytes # set/show hard limit for kernel memory
75 memory.kmem.usage_in_bytes # show current kernel memory allocation
76 memory.kmem.failcnt # show the number of kernel memory usage hits limits
77 memory.kmem.max_usage_in_bytes # show max kernel memory usage recorded
78
74 memory.kmem.tcp.limit_in_bytes # set/show hard limit for tcp buf memory 79 memory.kmem.tcp.limit_in_bytes # set/show hard limit for tcp buf memory
75 memory.kmem.tcp.usage_in_bytes # show current tcp buf memory allocation 80 memory.kmem.tcp.usage_in_bytes # show current tcp buf memory allocation
76 memory.kmem.tcp.failcnt # show the number of tcp buf memory usage hits limits 81 memory.kmem.tcp.failcnt # show the number of tcp buf memory usage hits limits
@@ -268,20 +273,73 @@ the amount of kernel memory used by the system. Kernel memory is fundamentally
268different than user memory, since it can't be swapped out, which makes it 273different than user memory, since it can't be swapped out, which makes it
269possible to DoS the system by consuming too much of this precious resource. 274possible to DoS the system by consuming too much of this precious resource.
270 275
276Kernel memory won't be accounted at all until limit on a group is set. This
277allows for existing setups to continue working without disruption. The limit
278cannot be set if the cgroup have children, or if there are already tasks in the
279cgroup. Attempting to set the limit under those conditions will return -EBUSY.
280When use_hierarchy == 1 and a group is accounted, its children will
281automatically be accounted regardless of their limit value.
282
283After a group is first limited, it will be kept being accounted until it
284is removed. The memory limitation itself, can of course be removed by writing
285-1 to memory.kmem.limit_in_bytes. In this case, kmem will be accounted, but not
286limited.
287
271Kernel memory limits are not imposed for the root cgroup. Usage for the root 288Kernel memory limits are not imposed for the root cgroup. Usage for the root
272cgroup may or may not be accounted. 289cgroup may or may not be accounted. The memory used is accumulated into
290memory.kmem.usage_in_bytes, or in a separate counter when it makes sense.
291(currently only for tcp).
292The main "kmem" counter is fed into the main counter, so kmem charges will
293also be visible from the user counter.
273 294
274Currently no soft limit is implemented for kernel memory. It is future work 295Currently no soft limit is implemented for kernel memory. It is future work
275to trigger slab reclaim when those limits are reached. 296to trigger slab reclaim when those limits are reached.
276 297
2772.7.1 Current Kernel Memory resources accounted 2982.7.1 Current Kernel Memory resources accounted
278 299
300* stack pages: every process consumes some stack pages. By accounting into
301kernel memory, we prevent new processes from being created when the kernel
302memory usage is too high.
303
304* slab pages: pages allocated by the SLAB or SLUB allocator are tracked. A copy
305of each kmem_cache is created everytime the cache is touched by the first time
306from inside the memcg. The creation is done lazily, so some objects can still be
307skipped while the cache is being created. All objects in a slab page should
308belong to the same memcg. This only fails to hold when a task is migrated to a
309different memcg during the page allocation by the cache.
310
279* sockets memory pressure: some sockets protocols have memory pressure 311* sockets memory pressure: some sockets protocols have memory pressure
280thresholds. The Memory Controller allows them to be controlled individually 312thresholds. The Memory Controller allows them to be controlled individually
281per cgroup, instead of globally. 313per cgroup, instead of globally.
282 314
283* tcp memory pressure: sockets memory pressure for the tcp protocol. 315* tcp memory pressure: sockets memory pressure for the tcp protocol.
284 316
3172.7.3 Common use cases
318
319Because the "kmem" counter is fed to the main user counter, kernel memory can
320never be limited completely independently of user memory. Say "U" is the user
321limit, and "K" the kernel limit. There are three possible ways limits can be
322set:
323
324 U != 0, K = unlimited:
325 This is the standard memcg limitation mechanism already present before kmem
326 accounting. Kernel memory is completely ignored.
327
328 U != 0, K < U:
329 Kernel memory is a subset of the user memory. This setup is useful in
330 deployments where the total amount of memory per-cgroup is overcommited.
331 Overcommiting kernel memory limits is definitely not recommended, since the
332 box can still run out of non-reclaimable memory.
333 In this case, the admin could set up K so that the sum of all groups is
334 never greater than the total memory, and freely set U at the cost of his
335 QoS.
336
337 U != 0, K >= U:
338 Since kmem charges will also be fed to the user counter and reclaim will be
339 triggered for the cgroup for both kinds of memory. This setup gives the
340 admin a unified view of memory, and it is also useful for people who just
341 want to track kernel memory usage.
342
2853. User Interface 3433. User Interface
286 344
2870. Configuration 3450. Configuration
@@ -290,6 +348,7 @@ a. Enable CONFIG_CGROUPS
290b. Enable CONFIG_RESOURCE_COUNTERS 348b. Enable CONFIG_RESOURCE_COUNTERS
291c. Enable CONFIG_MEMCG 349c. Enable CONFIG_MEMCG
292d. Enable CONFIG_MEMCG_SWAP (to use swap extension) 350d. Enable CONFIG_MEMCG_SWAP (to use swap extension)
351d. Enable CONFIG_MEMCG_KMEM (to use kmem extension)
293 352
2941. Prepare the cgroups (see cgroups.txt, Why are cgroups needed?) 3531. Prepare the cgroups (see cgroups.txt, Why are cgroups needed?)
295# mount -t tmpfs none /sys/fs/cgroup 354# mount -t tmpfs none /sys/fs/cgroup
@@ -406,6 +465,11 @@ About use_hierarchy, see Section 6.
406 Because rmdir() moves all pages to parent, some out-of-use page caches can be 465 Because rmdir() moves all pages to parent, some out-of-use page caches can be
407 moved to the parent. If you want to avoid that, force_empty will be useful. 466 moved to the parent. If you want to avoid that, force_empty will be useful.
408 467
468 Also, note that when memory.kmem.limit_in_bytes is set the charges due to
469 kernel pages will still be seen. This is not considered a failure and the
470 write will still return success. In this case, it is expected that
471 memory.kmem.usage_in_bytes == memory.usage_in_bytes.
472
409 About use_hierarchy, see Section 6. 473 About use_hierarchy, see Section 6.
410 474
4115.2 stat file 4755.2 stat file
diff --git a/Documentation/cgroups/resource_counter.txt b/Documentation/cgroups/resource_counter.txt
index 0c4a344e78fa..c4d99ed0b418 100644
--- a/Documentation/cgroups/resource_counter.txt
+++ b/Documentation/cgroups/resource_counter.txt
@@ -83,16 +83,17 @@ to work with it.
83 res_counter->lock internally (it must be called with res_counter->lock 83 res_counter->lock internally (it must be called with res_counter->lock
84 held). The force parameter indicates whether we can bypass the limit. 84 held). The force parameter indicates whether we can bypass the limit.
85 85
86 e. void res_counter_uncharge[_locked] 86 e. u64 res_counter_uncharge[_locked]
87 (struct res_counter *rc, unsigned long val) 87 (struct res_counter *rc, unsigned long val)
88 88
89 When a resource is released (freed) it should be de-accounted 89 When a resource is released (freed) it should be de-accounted
90 from the resource counter it was accounted to. This is called 90 from the resource counter it was accounted to. This is called
91 "uncharging". 91 "uncharging". The return value of this function indicate the amount
92 of charges still present in the counter.
92 93
93 The _locked routines imply that the res_counter->lock is taken. 94 The _locked routines imply that the res_counter->lock is taken.
94 95
95 f. void res_counter_uncharge_until 96 f. u64 res_counter_uncharge_until
96 (struct res_counter *rc, struct res_counter *top, 97 (struct res_counter *rc, struct res_counter *top,
97 unsinged long val) 98 unsinged long val)
98 99
diff --git a/Documentation/devicetree/bindings/arm/davinci/nand.txt b/Documentation/devicetree/bindings/arm/davinci/nand.txt
index 49fc7ada929a..3545ea704b50 100644
--- a/Documentation/devicetree/bindings/arm/davinci/nand.txt
+++ b/Documentation/devicetree/bindings/arm/davinci/nand.txt
@@ -23,6 +23,9 @@ Recommended properties :
23- ti,davinci-nand-buswidth: buswidth 8 or 16 23- ti,davinci-nand-buswidth: buswidth 8 or 16
24- ti,davinci-nand-use-bbt: use flash based bad block table support. 24- ti,davinci-nand-use-bbt: use flash based bad block table support.
25 25
26nand device bindings may contain additional sub-nodes describing
27partitions of the address space. See partition.txt for more detail.
28
26Example(da850 EVM ): 29Example(da850 EVM ):
27nand_cs3@62000000 { 30nand_cs3@62000000 {
28 compatible = "ti,davinci-nand"; 31 compatible = "ti,davinci-nand";
@@ -35,4 +38,9 @@ nand_cs3@62000000 {
35 ti,davinci-ecc-mode = "hw"; 38 ti,davinci-ecc-mode = "hw";
36 ti,davinci-ecc-bits = <4>; 39 ti,davinci-ecc-bits = <4>;
37 ti,davinci-nand-use-bbt; 40 ti,davinci-nand-use-bbt;
41
42 partition@180000 {
43 label = "ubifs";
44 reg = <0x180000 0x7e80000>;
45 };
38}; 46};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt b/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt
new file mode 100644
index 000000000000..8ce9cd2855b5
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt
@@ -0,0 +1,27 @@
1Device tree bindings for i2c-cbus-gpio driver
2
3Required properties:
4 - compatible = "i2c-cbus-gpio";
5 - gpios: clk, dat, sel
6 - #address-cells = <1>;
7 - #size-cells = <0>;
8
9Optional properties:
10 - child nodes conforming to i2c bus binding
11
12Example:
13
14i2c@0 {
15 compatible = "i2c-cbus-gpio";
16 gpios = <&gpio 66 0 /* clk */
17 &gpio 65 0 /* dat */
18 &gpio 64 0 /* sel */
19 >;
20 #address-cells = <1>;
21 #size-cells = <0>;
22
23 retu-mfd: retu@1 {
24 compatible = "retu-mfd";
25 reg = <0x1>;
26 };
27};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
new file mode 100644
index 000000000000..66709a825541
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
@@ -0,0 +1,81 @@
1GPIO-based I2C Bus Mux
2
3This binding describes an I2C bus multiplexer that uses GPIOs to
4route the I2C signals.
5
6 +-----+ +-----+
7 | dev | | dev |
8 +------------+ +-----+ +-----+
9 | SoC | | |
10 | | /--------+--------+
11 | +------+ | +------+ child bus A, on GPIO value set to 0
12 | | I2C |-|--| Mux |
13 | +------+ | +--+---+ child bus B, on GPIO value set to 1
14 | | | \----------+--------+--------+
15 | +------+ | | | | |
16 | | GPIO |-|-----+ +-----+ +-----+ +-----+
17 | +------+ | | dev | | dev | | dev |
18 +------------+ +-----+ +-----+ +-----+
19
20Required properties:
21- compatible: i2c-mux-gpio
22- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
23 port is connected to.
24- mux-gpios: list of gpios used to control the muxer
25* Standard I2C mux properties. See mux.txt in this directory.
26* I2C child bus nodes. See mux.txt in this directory.
27
28Optional properties:
29- idle-state: value to set the muxer to when idle. When no value is
30 given, it defaults to the last value used.
31
32For each i2c child node, an I2C child bus will be created. They will
33be numbered based on their order in the device tree.
34
35Whenever an access is made to a device on a child bus, the value set
36in the revelant node's reg property will be output using the list of
37GPIOs, the first in the list holding the least-significant value.
38
39If an idle state is defined, using the idle-state (optional) property,
40whenever an access is not being made to a device on a child bus, the
41GPIOs will be set according to the idle value.
42
43If an idle state is not defined, the most recently used value will be
44left programmed into hardware whenever no access is being made to a
45device on a child bus.
46
47Example:
48 i2cmux {
49 compatible = "i2c-mux-gpio";
50 #address-cells = <1>;
51 #size-cells = <0>;
52 mux-gpios = <&gpio1 22 0 &gpio1 23 0>;
53 i2c-parent = <&i2c1>;
54
55 i2c@1 {
56 reg = <1>;
57 #address-cells = <1>;
58 #size-cells = <0>;
59
60 ssd1307: oled@3c {
61 compatible = "solomon,ssd1307fb-i2c";
62 reg = <0x3c>;
63 pwms = <&pwm 4 3000>;
64 reset-gpios = <&gpio2 7 1>;
65 reset-active-low;
66 };
67 };
68
69 i2c@3 {
70 reg = <3>;
71 #address-cells = <1>;
72 #size-cells = <0>;
73
74 pca9555: pca9555@20 {
75 compatible = "nxp,pca9555";
76 gpio-controller;
77 #gpio-cells = <2>;
78 reg = <0x20>;
79 };
80 };
81 };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-ocores.txt b/Documentation/devicetree/bindings/i2c/i2c-ocores.txt
index c15781f4dc8c..1637c298a1b3 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-ocores.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-ocores.txt
@@ -1,7 +1,7 @@
1Device tree configuration for i2c-ocores 1Device tree configuration for i2c-ocores
2 2
3Required properties: 3Required properties:
4- compatible : "opencores,i2c-ocores" 4- compatible : "opencores,i2c-ocores" or "aeroflexgaisler,i2cmst"
5- reg : bus address start and address range size of device 5- reg : bus address start and address range size of device
6- interrupts : interrupt number 6- interrupts : interrupt number
7- clock-frequency : frequency of bus clock in Hz 7- clock-frequency : frequency of bus clock in Hz
diff --git a/Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt b/Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt
index b6cb5a12c672..e9611ace8792 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt
@@ -13,11 +13,17 @@ Required properties:
13 - interrupts: interrupt number to the cpu. 13 - interrupts: interrupt number to the cpu.
14 - samsung,i2c-sda-delay: Delay (in ns) applied to data line (SDA) edges. 14 - samsung,i2c-sda-delay: Delay (in ns) applied to data line (SDA) edges.
15 15
16Required for all cases except "samsung,s3c2440-hdmiphy-i2c":
17 - Samsung GPIO variant (deprecated):
18 - gpios: The order of the gpios should be the following: <SDA, SCL>.
19 The gpio specifier depends on the gpio controller. Required in all
20 cases except for "samsung,s3c2440-hdmiphy-i2c" whose input/output
21 lines are permanently wired to the respective clienta
22 - Pinctrl variant (preferred, if available):
23 - pinctrl-0: Pin control group to be used for this controller.
24 - pinctrl-names: Should contain only one value - "default".
25
16Optional properties: 26Optional properties:
17 - gpios: The order of the gpios should be the following: <SDA, SCL>.
18 The gpio specifier depends on the gpio controller. Required in all
19 cases except for "samsung,s3c2440-hdmiphy-i2c" whose input/output
20 lines are permanently wired to the respective client
21 - samsung,i2c-slave-addr: Slave address in multi-master enviroment. If not 27 - samsung,i2c-slave-addr: Slave address in multi-master enviroment. If not
22 specified, default value is 0. 28 specified, default value is 0.
23 - samsung,i2c-max-bus-freq: Desired frequency in Hz of the bus. If not 29 - samsung,i2c-max-bus-freq: Desired frequency in Hz of the bus. If not
@@ -31,8 +37,14 @@ Example:
31 interrupts = <345>; 37 interrupts = <345>;
32 samsung,i2c-sda-delay = <100>; 38 samsung,i2c-sda-delay = <100>;
33 samsung,i2c-max-bus-freq = <100000>; 39 samsung,i2c-max-bus-freq = <100000>;
40 /* Samsung GPIO variant begins here */
34 gpios = <&gpd1 2 0 /* SDA */ 41 gpios = <&gpd1 2 0 /* SDA */
35 &gpd1 3 0 /* SCL */>; 42 &gpd1 3 0 /* SCL */>;
43 /* Samsung GPIO variant ends here */
44 /* Pinctrl variant begins here */
45 pinctrl-0 = <&i2c3_bus>;
46 pinctrl-names = "default";
47 /* Pinctrl variant ends here */
36 #address-cells = <1>; 48 #address-cells = <1>;
37 #size-cells = <0>; 49 #size-cells = <0>;
38 50
diff --git a/Documentation/devicetree/bindings/mtd/denali-nand.txt b/Documentation/devicetree/bindings/mtd/denali-nand.txt
new file mode 100644
index 000000000000..b04d03a1d499
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/denali-nand.txt
@@ -0,0 +1,23 @@
1* Denali NAND controller
2
3Required properties:
4 - compatible : should be "denali,denali-nand-dt"
5 - reg : should contain registers location and length for data and reg.
6 - reg-names: Should contain the reg names "nand_data" and "denali_reg"
7 - interrupts : The interrupt number.
8 - dm-mask : DMA bit mask
9
10The device tree may optionally contain sub-nodes describing partitions of the
11address space. See partition.txt for more detail.
12
13Examples:
14
15nand: nand@ff900000 {
16 #address-cells = <1>;
17 #size-cells = <1>;
18 compatible = "denali,denali-nand-dt";
19 reg = <0xff900000 0x100000>, <0xffb80000 0x10000>;
20 reg-names = "nand_data", "denali_reg";
21 interrupts = <0 144 4>;
22 dma-mask = <0xffffffff>;
23};
diff --git a/Documentation/devicetree/bindings/mtd/flctl-nand.txt b/Documentation/devicetree/bindings/mtd/flctl-nand.txt
new file mode 100644
index 000000000000..427f46dc60ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/flctl-nand.txt
@@ -0,0 +1,49 @@
1FLCTL NAND controller
2
3Required properties:
4- compatible : "renesas,shmobile-flctl-sh7372"
5- reg : Address range of the FLCTL
6- interrupts : flste IRQ number
7- nand-bus-width : bus width to NAND chip
8
9Optional properties:
10- dmas: DMA specifier(s)
11- dma-names: name for each DMA specifier. Valid names are
12 "data_tx", "data_rx", "ecc_tx", "ecc_rx"
13
14The DMA fields are not used yet in the driver but are listed here for
15completing the bindings.
16
17The device tree may optionally contain sub-nodes describing partitions of the
18address space. See partition.txt for more detail.
19
20Example:
21
22 flctl@e6a30000 {
23 #address-cells = <1>;
24 #size-cells = <1>;
25 compatible = "renesas,shmobile-flctl-sh7372";
26 reg = <0xe6a30000 0x100>;
27 interrupts = <0x0d80>;
28
29 nand-bus-width = <16>;
30
31 dmas = <&dmac 1 /* data_tx */
32 &dmac 2;> /* data_rx */
33 dma-names = "data_tx", "data_rx";
34
35 system@0 {
36 label = "system";
37 reg = <0x0 0x8000000>;
38 };
39
40 userdata@8000000 {
41 label = "userdata";
42 reg = <0x8000000 0x10000000>;
43 };
44
45 cache@18000000 {
46 label = "cache";
47 reg = <0x18000000 0x8000000>;
48 };
49 };
diff --git a/Documentation/devicetree/bindings/mtd/fsmc-nand.txt b/Documentation/devicetree/bindings/mtd/fsmc-nand.txt
index e2c663b354d2..e3ea32e7de3e 100644
--- a/Documentation/devicetree/bindings/mtd/fsmc-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/fsmc-nand.txt
@@ -3,9 +3,7 @@
3Required properties: 3Required properties:
4- compatible : "st,spear600-fsmc-nand" 4- compatible : "st,spear600-fsmc-nand"
5- reg : Address range of the mtd chip 5- reg : Address range of the mtd chip
6- reg-names: Should contain the reg names "fsmc_regs" and "nand_data" 6- reg-names: Should contain the reg names "fsmc_regs", "nand_data", "nand_addr" and "nand_cmd"
7- st,ale-off : Chip specific offset to ALE
8- st,cle-off : Chip specific offset to CLE
9 7
10Optional properties: 8Optional properties:
11- bank-width : Width (in bytes) of the device. If not present, the width 9- bank-width : Width (in bytes) of the device. If not present, the width
@@ -19,10 +17,10 @@ Example:
19 #address-cells = <1>; 17 #address-cells = <1>;
20 #size-cells = <1>; 18 #size-cells = <1>;
21 reg = <0xd1800000 0x1000 /* FSMC Register */ 19 reg = <0xd1800000 0x1000 /* FSMC Register */
22 0xd2000000 0x4000>; /* NAND Base */ 20 0xd2000000 0x0010 /* NAND Base DATA */
23 reg-names = "fsmc_regs", "nand_data"; 21 0xd2020000 0x0010 /* NAND Base ADDR */
24 st,ale-off = <0x20000>; 22 0xd2010000 0x0010>; /* NAND Base CMD */
25 st,cle-off = <0x10000>; 23 reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd";
26 24
27 bank-width = <1>; 25 bank-width = <1>;
28 nand-skip-bbtscan; 26 nand-skip-bbtscan;
diff --git a/Documentation/devicetree/bindings/mtd/m25p80.txt b/Documentation/devicetree/bindings/mtd/m25p80.txt
new file mode 100644
index 000000000000..6d3d57609470
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/m25p80.txt
@@ -0,0 +1,29 @@
1* MTD SPI driver for ST M25Pxx (and similar) serial flash chips
2
3Required properties:
4- #address-cells, #size-cells : Must be present if the device has sub-nodes
5 representing partitions.
6- compatible : Should be the manufacturer and the name of the chip. Bear in mind
7 the DT binding is not Linux-only, but in case of Linux, see the
8 "m25p_ids" table in drivers/mtd/devices/m25p80.c for the list of
9 supported chips.
10- reg : Chip-Select number
11- spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at
12
13Optional properties:
14- m25p,fast-read : Use the "fast read" opcode to read data from the chip instead
15 of the usual "read" opcode. This opcode is not supported by
16 all chips and support for it can not be detected at runtime.
17 Refer to your chips' datasheet to check if this is supported
18 by your chip.
19
20Example:
21
22 flash: m25p80@0 {
23 #address-cells = <1>;
24 #size-cells = <1>;
25 compatible = "spansion,m25p80";
26 reg = <0>;
27 spi-max-frequency = <40000000>;
28 m25p,fast-read;
29 };
diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
index 94de19b8f16b..dab7847fc800 100644
--- a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
+++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
@@ -23,6 +23,9 @@ file systems on embedded devices.
23 unaligned accesses as implemented in the JFFS2 code via memcpy(). 23 unaligned accesses as implemented in the JFFS2 code via memcpy().
24 By defining "no-unaligned-direct-access", the flash will not be 24 By defining "no-unaligned-direct-access", the flash will not be
25 exposed directly to the MTD users (e.g. JFFS2) any more. 25 exposed directly to the MTD users (e.g. JFFS2) any more.
26 - linux,mtd-name: allow to specify the mtd name for retro capability with
27 physmap-flash drivers as boot loader pass the mtd partition via the old
28 device name physmap-flash.
26 29
27For JEDEC compatible devices, the following additional properties 30For JEDEC compatible devices, the following additional properties
28are defined: 31are defined:
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
new file mode 100644
index 000000000000..131e8c11d26f
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
@@ -0,0 +1,23 @@
1TI SOC ECAP based APWM controller
2
3Required properties:
4- compatible: Must be "ti,am33xx-ecap"
5- #pwm-cells: Should be 3. Number of cells being used to specify PWM property.
6 First cell specifies the per-chip index of the PWM to use, the second
7 cell is the period in nanoseconds and bit 0 in the third cell is used to
8 encode the polarity of PWM output. Set bit 0 of the third in PWM specifier
9 to 1 for inverse polarity & set to 0 for normal polarity.
10- reg: physical base address and size of the registers map.
11
12Optional properties:
13- ti,hwmods: Name of the hwmod associated to the ECAP:
14 "ecap<x>", <x> being the 0-based instance number from the HW spec
15
16Example:
17
18ecap0: ecap@0 {
19 compatible = "ti,am33xx-ecap";
20 #pwm-cells = <3>;
21 reg = <0x48300100 0x80>;
22 ti,hwmods = "ecap0";
23};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt b/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
new file mode 100644
index 000000000000..4fc7079d822e
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
@@ -0,0 +1,23 @@
1TI SOC EHRPWM based PWM controller
2
3Required properties:
4- compatible : Must be "ti,am33xx-ehrpwm"
5- #pwm-cells: Should be 3. Number of cells being used to specify PWM property.
6 First cell specifies the per-chip index of the PWM to use, the second
7 cell is the period in nanoseconds and bit 0 in the third cell is used to
8 encode the polarity of PWM output. Set bit 0 of the third in PWM specifier
9 to 1 for inverse polarity & set to 0 for normal polarity.
10- reg: physical base address and size of the registers map.
11
12Optional properties:
13- ti,hwmods: Name of the hwmod associated to the EHRPWM:
14 "ehrpwm<x>", <x> being the 0-based instance number from the HW spec
15
16Example:
17
18ehrpwm0: ehrpwm@0 {
19 compatible = "ti,am33xx-ehrpwm";
20 #pwm-cells = <3>;
21 reg = <0x48300200 0x100>;
22 ti,hwmods = "ehrpwm0";
23};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt b/Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt
new file mode 100644
index 000000000000..f7eae77f8354
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt
@@ -0,0 +1,31 @@
1TI SOC based PWM Subsystem
2
3Required properties:
4- compatible: Must be "ti,am33xx-pwmss";
5- reg: physical base address and size of the registers map.
6- address-cells: Specify the number of u32 entries needed in child nodes.
7 Should set to 1.
8- size-cells: specify number of u32 entries needed to specify child nodes size
9 in reg property. Should set to 1.
10- ranges: describes the address mapping of a memory-mapped bus. Should set to
11 physical address map of child's base address, physical address within
12 parent's address space and length of the address map. For am33xx,
13 3 set of child register maps present, ECAP register space, EQEP
14 register space, EHRPWM register space.
15
16Also child nodes should also populated under PWMSS DT node.
17
18Example:
19pwmss0: pwmss@48300000 {
20 compatible = "ti,am33xx-pwmss";
21 reg = <0x48300000 0x10>;
22 ti,hwmods = "epwmss0";
23 #address-cells = <1>;
24 #size-cells = <1>;
25 status = "disabled";
26 ranges = <0x48300100 0x48300100 0x80 /* ECAP */
27 0x48300180 0x48300180 0x80 /* EQEP */
28 0x48300200 0x48300200 0x80>; /* EHRPWM */
29
30 /* child nodes go here */
31};
diff --git a/Documentation/devicetree/bindings/pwm/pwm.txt b/Documentation/devicetree/bindings/pwm/pwm.txt
index 73ec962bfe8c..06e67247859a 100644
--- a/Documentation/devicetree/bindings/pwm/pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm.txt
@@ -37,10 +37,21 @@ device:
37 pwm-names = "backlight"; 37 pwm-names = "backlight";
38 }; 38 };
39 39
40Note that in the example above, specifying the "pwm-names" is redundant
41because the name "backlight" would be used as fallback anyway.
42
40pwm-specifier typically encodes the chip-relative PWM number and the PWM 43pwm-specifier typically encodes the chip-relative PWM number and the PWM
41period in nanoseconds. Note that in the example above, specifying the 44period in nanoseconds.
42"pwm-names" is redundant because the name "backlight" would be used as 45
43fallback anyway. 46Optionally, the pwm-specifier can encode a number of flags in a third cell:
47- bit 0: PWM signal polarity (0: normal polarity, 1: inverse polarity)
48
49Example with optional PWM specifier for inverse polarity
50
51 bl: backlight {
52 pwms = <&pwm 0 5000000 1>;
53 pwm-names = "backlight";
54 };
44 55
452) PWM controller nodes 562) PWM controller nodes
46----------------------- 57-----------------------
diff --git a/Documentation/devicetree/bindings/pwm/spear-pwm.txt b/Documentation/devicetree/bindings/pwm/spear-pwm.txt
new file mode 100644
index 000000000000..3ac779d83386
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/spear-pwm.txt
@@ -0,0 +1,18 @@
1== ST SPEAr SoC PWM controller ==
2
3Required properties:
4- compatible: should be one of:
5 - "st,spear320-pwm"
6 - "st,spear1340-pwm"
7- reg: physical base address and length of the controller's registers
8- #pwm-cells: number of cells used to specify PWM which is fixed to 2 on
9 SPEAr. The first cell specifies the per-chip index of the PWM to use and
10 the second cell is the period in nanoseconds.
11
12Example:
13
14 pwm: pwm@a8000000 {
15 compatible ="st,spear320-pwm";
16 reg = <0xa8000000 0x1000>;
17 #pwm-cells = <2>;
18 };
diff --git a/Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt b/Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt
new file mode 100644
index 000000000000..2943ee5fce00
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt
@@ -0,0 +1,17 @@
1Texas Instruments TWL series PWM drivers
2
3Supported PWMs:
4On TWL4030 series: PWM1 and PWM2
5On TWL6030 series: PWM0 and PWM1
6
7Required properties:
8- compatible: "ti,twl4030-pwm" or "ti,twl6030-pwm"
9- #pwm-cells: should be 2. The first cell specifies the per-chip index
10 of the PWM to use and the second cell is the period in nanoseconds.
11
12Example:
13
14twl_pwm: pwm {
15 compatible = "ti,twl6030-pwm";
16 #pwm-cells = <2>;
17};
diff --git a/Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt b/Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt
new file mode 100644
index 000000000000..cb64f3acc10f
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt
@@ -0,0 +1,17 @@
1Texas Instruments TWL series PWM drivers connected to LED terminals
2
3Supported PWMs:
4On TWL4030 series: PWMA and PWMB (connected to LEDA and LEDB terminals)
5On TWL6030 series: LED PWM (mainly used as charging indicator LED)
6
7Required properties:
8- compatible: "ti,twl4030-pwmled" or "ti,twl6030-pwmled"
9- #pwm-cells: should be 2. The first cell specifies the per-chip index
10 of the PWM to use and the second cell is the period in nanoseconds.
11
12Example:
13
14twl_pwmled: pwmled {
15 compatible = "ti,twl6030-pwmled";
16 #pwm-cells = <2>;
17};
diff --git a/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt b/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt
new file mode 100644
index 000000000000..bcc63678a9a5
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt
@@ -0,0 +1,17 @@
1VIA/Wondermedia VT8500/WM8xxx series SoC PWM controller
2
3Required properties:
4- compatible: should be "via,vt8500-pwm"
5- reg: physical base address and length of the controller's registers
6- #pwm-cells: should be 2. The first cell specifies the per-chip index
7 of the PWM to use and the second cell is the period in nanoseconds.
8- clocks: phandle to the PWM source clock
9
10Example:
11
12pwm1: pwm@d8220000 {
13 #pwm-cells = <2>;
14 compatible = "via,vt8500-pwm";
15 reg = <0xd8220000 0x1000>;
16 clocks = <&clkpwm>;
17};
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt
index 8cf24f6f0a99..7b53da5cb75b 100644
--- a/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt
+++ b/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt
@@ -13,7 +13,7 @@ Recommended properties:
13 13
14Example: 14Example:
15 15
16spi@7000d600 { 16spi@7000c380 {
17 compatible = "nvidia,tegra20-sflash"; 17 compatible = "nvidia,tegra20-sflash";
18 reg = <0x7000c380 0x80>; 18 reg = <0x7000c380 0x80>;
19 interrupts = <0 39 0x04>; 19 interrupts = <0 39 0x04>;
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt
index f5b1ad1a1ec3..eefe15e3d95e 100644
--- a/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt
+++ b/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt
@@ -13,7 +13,7 @@ Recommended properties:
13 13
14Example: 14Example:
15 15
16slink@7000d600 { 16spi@7000d600 {
17 compatible = "nvidia,tegra20-slink"; 17 compatible = "nvidia,tegra20-slink";
18 reg = <0x7000d600 0x200>; 18 reg = <0x7000d600 0x200>;
19 interrupts = <0 82 0x04>; 19 interrupts = <0 82 0x04>;
diff --git a/Documentation/devicetree/bindings/spi/spi_atmel.txt b/Documentation/devicetree/bindings/spi/spi_atmel.txt
new file mode 100644
index 000000000000..07e04cdc0c9e
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi_atmel.txt
@@ -0,0 +1,26 @@
1Atmel SPI device
2
3Required properties:
4- compatible : should be "atmel,at91rm9200-spi".
5- reg: Address and length of the register set for the device
6- interrupts: Should contain spi interrupt
7- cs-gpios: chipselects
8
9Example:
10
11spi1: spi@fffcc000 {
12 compatible = "atmel,at91rm9200-spi";
13 reg = <0xfffcc000 0x4000>;
14 interrupts = <13 4 5>;
15 #address-cells = <1>;
16 #size-cells = <0>;
17 cs-gpios = <&pioB 3 0>;
18 status = "okay";
19
20 mmc-slot@0 {
21 compatible = "mmc-spi-slot";
22 reg = <0>;
23 gpios = <&pioC 4 0>; /* CD */
24 spi-max-frequency = <25000000>;
25 };
26};
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index f15cb74c4f78..406d82d5d2bb 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -373,7 +373,7 @@ Protocol: 2.00+
373 1 Loadlin 373 1 Loadlin
374 2 bootsect-loader (0x20, all other values reserved) 374 2 bootsect-loader (0x20, all other values reserved)
375 3 Syslinux 375 3 Syslinux
376 4 Etherboot/gPXE 376 4 Etherboot/gPXE/iPXE
377 5 ELILO 377 5 ELILO
378 7 GRUB 378 7 GRUB
379 8 U-Boot 379 8 U-Boot
@@ -381,6 +381,7 @@ Protocol: 2.00+
381 A Gujin 381 A Gujin
382 B Qemu 382 B Qemu
383 C Arcturus Networks uCbootloader 383 C Arcturus Networks uCbootloader
384 D kexec-tools
384 E Extended (see ext_loader_type) 385 E Extended (see ext_loader_type)
385 F Special (0xFF = undefined) 386 F Special (0xFF = undefined)
386 10 Reserved 387 10 Reserved
diff --git a/Documentation/xtensa/atomctl.txt b/Documentation/xtensa/atomctl.txt
new file mode 100644
index 000000000000..10a8d1ff35ec
--- /dev/null
+++ b/Documentation/xtensa/atomctl.txt
@@ -0,0 +1,44 @@
1We Have Atomic Operation Control (ATOMCTL) Register.
2This register determines the effect of using a S32C1I instruction
3with various combinations of:
4
5 1. With and without an Coherent Cache Controller which
6 can do Atomic Transactions to the memory internally.
7
8 2. With and without An Intelligent Memory Controller which
9 can do Atomic Transactions itself.
10
11The Core comes up with a default value of for the three types of cache ops:
12
13 0x28: (WB: Internal, WT: Internal, BY:Exception)
14
15On the FPGA Cards we typically simulate an Intelligent Memory controller
16which can implement RCW transactions. For FPGA cards with an External
17Memory controller we let it to the atomic operations internally while
18doing a Cached (WB) transaction and use the Memory RCW for un-cached
19operations.
20
21For systems without an coherent cache controller, non-MX, we always
22use the memory controllers RCW, thought non-MX controlers likely
23support the Internal Operation.
24
25CUSTOMER-WARNING:
26 Virtually all customers buy their memory controllers from vendors that
27 don't support atomic RCW memory transactions and will likely want to
28 configure this register to not use RCW.
29
30Developers might find using RCW in Bypass mode convenient when testing
31with the cache being bypassed; for example studying cache alias problems.
32
33See Section 4.3.12.4 of ISA; Bits:
34
35 WB WT BY
36 5 4 | 3 2 | 1 0
37 2 Bit
38 Field
39 Values WB - Write Back WT - Write Thru BY - Bypass
40--------- --------------- ----------------- ----------------
41 0 Exception Exception Exception
42 1 RCW Transaction RCW Transaction RCW Transaction
43 2 Internal Operation Exception Reserved
44 3 Reserved Reserved Reserved
diff --git a/Makefile b/Makefile
index 540f7b240c77..6f07f4a28b47 100644
--- a/Makefile
+++ b/Makefile
@@ -981,6 +981,12 @@ _modinst_post: _modinst_
981 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modinst 981 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modinst
982 $(call cmd,depmod) 982 $(call cmd,depmod)
983 983
984ifeq ($(CONFIG_MODULE_SIG), y)
985PHONY += modules_sign
986modules_sign:
987 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modsign
988endif
989
984else # CONFIG_MODULES 990else # CONFIG_MODULES
985 991
986# Modules not configured 992# Modules not configured
diff --git a/arch/Kconfig b/arch/Kconfig
index 54ffd0f9df21..8e9e3246b2b4 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -113,6 +113,25 @@ config HAVE_EFFICIENT_UNALIGNED_ACCESS
113 See Documentation/unaligned-memory-access.txt for more 113 See Documentation/unaligned-memory-access.txt for more
114 information on the topic of unaligned memory accesses. 114 information on the topic of unaligned memory accesses.
115 115
116config ARCH_USE_BUILTIN_BSWAP
117 bool
118 help
119 Modern versions of GCC (since 4.4) have builtin functions
120 for handling byte-swapping. Using these, instead of the old
121 inline assembler that the architecture code provides in the
122 __arch_bswapXX() macros, allows the compiler to see what's
123 happening and offers more opportunity for optimisation. In
124 particular, the compiler will be able to combine the byteswap
125 with a nearby load or store and use load-and-swap or
126 store-and-swap instructions if the architecture has them. It
127 should almost *never* result in code which is worse than the
128 hand-coded assembler in <asm/swab.h>. But just in case it
129 does, the use of the builtins is optional.
130
131 Any architecture with load-and-swap or store-and-swap
132 instructions should set this. And it shouldn't hurt to set it
133 on architectures that don't have such instructions.
134
116config HAVE_SYSCALL_WRAPPERS 135config HAVE_SYSCALL_WRAPPERS
117 bool 136 bool
118 137
diff --git a/arch/arm/boot/dts/imx28-cfa10049.dts b/arch/arm/boot/dts/imx28-cfa10049.dts
index b222614ac9e0..bdc80a4453dd 100644
--- a/arch/arm/boot/dts/imx28-cfa10049.dts
+++ b/arch/arm/boot/dts/imx28-cfa10049.dts
@@ -92,6 +92,30 @@
92 status = "okay"; 92 status = "okay";
93 }; 93 };
94 94
95 i2cmux {
96 compatible = "i2c-mux-gpio";
97 #address-cells = <1>;
98 #size-cells = <0>;
99 mux-gpios = <&gpio1 22 0 &gpio1 23 0>;
100 i2c-parent = <&i2c1>;
101
102 i2c@0 {
103 reg = <0>;
104 };
105
106 i2c@1 {
107 reg = <1>;
108 };
109
110 i2c@2 {
111 reg = <2>;
112 };
113
114 i2c@3 {
115 reg = <3>;
116 };
117 };
118
95 usbphy1: usbphy@8007e000 { 119 usbphy1: usbphy@8007e000 {
96 status = "okay"; 120 status = "okay";
97 }; 121 };
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index 009096d1d2c3..b4ca60f4eb42 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -73,7 +73,7 @@
73 400000 73 400000
74 500000 74 500000
75 600000 >; 75 600000 >;
76 status = "disable"; 76 status = "disabled";
77 }; 77 };
78 78
79 ahb { 79 ahb {
@@ -118,15 +118,15 @@
118 compatible = "st,spear600-fsmc-nand"; 118 compatible = "st,spear600-fsmc-nand";
119 #address-cells = <1>; 119 #address-cells = <1>;
120 #size-cells = <1>; 120 #size-cells = <1>;
121 reg = <0xb0000000 0x1000 /* FSMC Register */ 121 reg = <0xb0000000 0x1000 /* FSMC Register*/
122 0xb0800000 0x0010>; /* NAND Base */ 122 0xb0800000 0x0010 /* NAND Base DATA */
123 reg-names = "fsmc_regs", "nand_data"; 123 0xb0820000 0x0010 /* NAND Base ADDR */
124 0xb0810000 0x0010>; /* NAND Base CMD */
125 reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd";
124 interrupts = <0 20 0x4 126 interrupts = <0 20 0x4
125 0 21 0x4 127 0 21 0x4
126 0 22 0x4 128 0 22 0x4
127 0 23 0x4>; 129 0 23 0x4>;
128 st,ale-off = <0x20000>;
129 st,cle-off = <0x10000>;
130 st,mode = <2>; 130 st,mode = <2>;
131 status = "disabled"; 131 status = "disabled";
132 }; 132 };
@@ -144,7 +144,7 @@
144 compatible = "st,pcm-audio"; 144 compatible = "st,pcm-audio";
145 #address-cells = <0>; 145 #address-cells = <0>;
146 #size-cells = <0>; 146 #size-cells = <0>;
147 status = "disable"; 147 status = "disabled";
148 }; 148 };
149 149
150 smi: flash@ea000000 { 150 smi: flash@ea000000 {
diff --git a/arch/arm/boot/dts/spear300.dtsi b/arch/arm/boot/dts/spear300.dtsi
index 090adc656015..f79b3dfaabe6 100644
--- a/arch/arm/boot/dts/spear300.dtsi
+++ b/arch/arm/boot/dts/spear300.dtsi
@@ -38,10 +38,10 @@
38 #address-cells = <1>; 38 #address-cells = <1>;
39 #size-cells = <1>; 39 #size-cells = <1>;
40 reg = <0x94000000 0x1000 /* FSMC Register */ 40 reg = <0x94000000 0x1000 /* FSMC Register */
41 0x80000000 0x0010>; /* NAND Base */ 41 0x80000000 0x0010 /* NAND Base DATA */
42 reg-names = "fsmc_regs", "nand_data"; 42 0x80020000 0x0010 /* NAND Base ADDR */
43 st,ale-off = <0x20000>; 43 0x80010000 0x0010>; /* NAND Base CMD */
44 st,cle-off = <0x10000>; 44 reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd";
45 status = "disabled"; 45 status = "disabled";
46 }; 46 };
47 47
diff --git a/arch/arm/boot/dts/spear310.dtsi b/arch/arm/boot/dts/spear310.dtsi
index e814e5e97083..ab45b8c81982 100644
--- a/arch/arm/boot/dts/spear310.dtsi
+++ b/arch/arm/boot/dts/spear310.dtsi
@@ -33,10 +33,10 @@
33 #address-cells = <1>; 33 #address-cells = <1>;
34 #size-cells = <1>; 34 #size-cells = <1>;
35 reg = <0x44000000 0x1000 /* FSMC Register */ 35 reg = <0x44000000 0x1000 /* FSMC Register */
36 0x40000000 0x0010>; /* NAND Base */ 36 0x40000000 0x0010 /* NAND Base DATA */
37 reg-names = "fsmc_regs", "nand_data"; 37 0x40020000 0x0010 /* NAND Base ADDR */
38 st,ale-off = <0x10000>; 38 0x40010000 0x0010>; /* NAND Base CMD */
39 st,cle-off = <0x20000>; 39 reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd";
40 status = "disabled"; 40 status = "disabled";
41 }; 41 };
42 42
diff --git a/arch/arm/boot/dts/spear320.dtsi b/arch/arm/boot/dts/spear320.dtsi
index c056a84deabf..caa5520b1fd4 100644
--- a/arch/arm/boot/dts/spear320.dtsi
+++ b/arch/arm/boot/dts/spear320.dtsi
@@ -40,10 +40,10 @@
40 #address-cells = <1>; 40 #address-cells = <1>;
41 #size-cells = <1>; 41 #size-cells = <1>;
42 reg = <0x4c000000 0x1000 /* FSMC Register */ 42 reg = <0x4c000000 0x1000 /* FSMC Register */
43 0x50000000 0x0010>; /* NAND Base */ 43 0x50000000 0x0010 /* NAND Base DATA */
44 reg-names = "fsmc_regs", "nand_data"; 44 0x50020000 0x0010 /* NAND Base ADDR */
45 st,ale-off = <0x20000>; 45 0x50010000 0x0010>; /* NAND Base CMD */
46 st,cle-off = <0x10000>; 46 reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd";
47 status = "disabled"; 47 status = "disabled";
48 }; 48 };
49 49
diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
index e051dde5181f..19f99dc4115e 100644
--- a/arch/arm/boot/dts/spear600.dtsi
+++ b/arch/arm/boot/dts/spear600.dtsi
@@ -76,10 +76,10 @@
76 #address-cells = <1>; 76 #address-cells = <1>;
77 #size-cells = <1>; 77 #size-cells = <1>;
78 reg = <0xd1800000 0x1000 /* FSMC Register */ 78 reg = <0xd1800000 0x1000 /* FSMC Register */
79 0xd2000000 0x4000>; /* NAND Base */ 79 0xd2000000 0x0010 /* NAND Base DATA */
80 reg-names = "fsmc_regs", "nand_data"; 80 0xd2020000 0x0010 /* NAND Base ADDR */
81 st,ale-off = <0x20000>; 81 0xd2010000 0x0010>; /* NAND Base CMD */
82 st,cle-off = <0x10000>; 82 reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd";
83 status = "disabled"; 83 status = "disabled";
84 }; 84 };
85 85
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
index 240b25eea565..86cfd2959c47 100644
--- a/arch/arm/configs/nhk8815_defconfig
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -57,7 +57,7 @@ CONFIG_MTD_CHAR=y
57CONFIG_MTD_BLOCK=y 57CONFIG_MTD_BLOCK=y
58CONFIG_MTD_NAND=y 58CONFIG_MTD_NAND=y
59CONFIG_MTD_NAND_ECC_SMC=y 59CONFIG_MTD_NAND_ECC_SMC=y
60CONFIG_MTD_NAND_NOMADIK=y 60CONFIG_MTD_NAND_FSMC=y
61CONFIG_MTD_ONENAND=y 61CONFIG_MTD_ONENAND=y
62CONFIG_MTD_ONENAND_VERIFY_WRITE=y 62CONFIG_MTD_ONENAND_VERIFY_WRITE=y
63CONFIG_MTD_ONENAND_GENERIC=y 63CONFIG_MTD_ONENAND_GENERIC=y
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index ac03bdb4ae44..4da7cde70b5d 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -405,6 +405,7 @@
405#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376) 405#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
406#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) 406#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
407 /* 378 for kcmp */ 407 /* 378 for kcmp */
408#define __NR_finit_module (__NR_SYSCALL_BASE+379)
408 409
409/* 410/*
410 * This may need to be greater than __NR_last_syscall+1 in order to 411 * This may need to be greater than __NR_last_syscall+1 in order to
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 5935b6a02e6e..a4fda4e7a372 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -388,6 +388,7 @@
388 CALL(sys_process_vm_readv) 388 CALL(sys_process_vm_readv)
389 CALL(sys_process_vm_writev) 389 CALL(sys_process_vm_writev)
390 CALL(sys_ni_syscall) /* reserved for sys_kcmp */ 390 CALL(sys_ni_syscall) /* reserved for sys_kcmp */
391 CALL(sys_finit_module)
391#ifndef syscalls_counted 392#ifndef syscalls_counted
392.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 393.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
393#define syscalls_counted 394#define syscalls_counted
diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c
index 5ccdf53c5a9d..98167a4319f7 100644
--- a/arch/arm/mach-nomadik/board-nhk8815.c
+++ b/arch/arm/mach-nomadik/board-nhk8815.c
@@ -19,6 +19,7 @@
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20#include <linux/mtd/mtd.h> 20#include <linux/mtd/mtd.h>
21#include <linux/mtd/nand.h> 21#include <linux/mtd/nand.h>
22#include <linux/mtd/fsmc.h>
22#include <linux/mtd/onenand.h> 23#include <linux/mtd/onenand.h>
23#include <linux/mtd/partitions.h> 24#include <linux/mtd/partitions.h>
24#include <linux/i2c.h> 25#include <linux/i2c.h>
@@ -33,7 +34,6 @@
33#include <asm/mach/arch.h> 34#include <asm/mach/arch.h>
34#include <asm/mach/flash.h> 35#include <asm/mach/flash.h>
35#include <asm/mach/time.h> 36#include <asm/mach/time.h>
36#include <mach/fsmc.h>
37#include <mach/irqs.h> 37#include <mach/irqs.h>
38 38
39#include "cpu-8815.h" 39#include "cpu-8815.h"
@@ -42,39 +42,34 @@
42#define SRC_CR_INIT_MASK 0x00007fff 42#define SRC_CR_INIT_MASK 0x00007fff
43#define SRC_CR_INIT_VAL 0x2aaa8000 43#define SRC_CR_INIT_VAL 0x2aaa8000
44 44
45#define ALE_OFF 0x1000000
46#define CLE_OFF 0x800000
47
45/* These addresses span 16MB, so use three individual pages */ 48/* These addresses span 16MB, so use three individual pages */
46static struct resource nhk8815_nand_resources[] = { 49static struct resource nhk8815_nand_resources[] = {
47 { 50 {
51 .name = "nand_data",
52 .start = 0x40000000,
53 .end = 0x40000000 + SZ_16K - 1,
54 .flags = IORESOURCE_MEM,
55 }, {
48 .name = "nand_addr", 56 .name = "nand_addr",
49 .start = NAND_IO_ADDR, 57 .start = 0x40000000 + ALE_OFF,
50 .end = NAND_IO_ADDR + 0xfff, 58 .end = 0x40000000 +ALE_OFF + SZ_16K - 1,
51 .flags = IORESOURCE_MEM, 59 .flags = IORESOURCE_MEM,
52 }, { 60 }, {
53 .name = "nand_cmd", 61 .name = "nand_cmd",
54 .start = NAND_IO_CMD, 62 .start = 0x40000000 + CLE_OFF,
55 .end = NAND_IO_CMD + 0xfff, 63 .end = 0x40000000 + CLE_OFF + SZ_16K - 1,
56 .flags = IORESOURCE_MEM, 64 .flags = IORESOURCE_MEM,
57 }, { 65 }, {
58 .name = "nand_data", 66 .name = "fsmc_regs",
59 .start = NAND_IO_DATA, 67 .start = NOMADIK_FSMC_BASE,
60 .end = NAND_IO_DATA + 0xfff, 68 .end = NOMADIK_FSMC_BASE + SZ_4K - 1,
61 .flags = IORESOURCE_MEM, 69 .flags = IORESOURCE_MEM,
62 } 70 },
63}; 71};
64 72
65static int nhk8815_nand_init(void)
66{
67 /* FSMC setup for nand chip select (8-bit nand in 8815NHK) */
68 writel(0x0000000E, FSMC_PCR(0));
69 writel(0x000D0A00, FSMC_PMEM(0));
70 writel(0x00100A00, FSMC_PATT(0));
71
72 /* enable access to the chip select area */
73 writel(readl(FSMC_PCR(0)) | 0x04, FSMC_PCR(0));
74
75 return 0;
76}
77
78/* 73/*
79 * These partitions are the same as those used in the 2.6.20 release 74 * These partitions are the same as those used in the 2.6.20 release
80 * shipped by the vendor; the first two partitions are mandated 75 * shipped by the vendor; the first two partitions are mandated
@@ -108,20 +103,28 @@ static struct mtd_partition nhk8815_partitions[] = {
108 } 103 }
109}; 104};
110 105
111static struct nomadik_nand_platform_data nhk8815_nand_data = { 106static struct fsmc_nand_timings nhk8815_nand_timings = {
112 .parts = nhk8815_partitions, 107 .thiz = 0,
113 .nparts = ARRAY_SIZE(nhk8815_partitions), 108 .thold = 0x10,
114 .options = NAND_COPYBACK | NAND_CACHEPRG | NAND_NO_PADDING, 109 .twait = 0x0A,
115 .init = nhk8815_nand_init, 110 .tset = 0,
111};
112
113static struct fsmc_nand_platform_data nhk8815_nand_platform_data = {
114 .nand_timings = &nhk8815_nand_timings,
115 .partitions = nhk8815_partitions,
116 .nr_partitions = ARRAY_SIZE(nhk8815_partitions),
117 .width = FSMC_NAND_BW8,
116}; 118};
117 119
118static struct platform_device nhk8815_nand_device = { 120static struct platform_device nhk8815_nand_device = {
119 .name = "nomadik_nand", 121 .name = "fsmc-nand",
120 .dev = { 122 .id = -1,
121 .platform_data = &nhk8815_nand_data, 123 .resource = nhk8815_nand_resources,
124 .num_resources = ARRAY_SIZE(nhk8815_nand_resources),
125 .dev = {
126 .platform_data = &nhk8815_nand_platform_data,
122 }, 127 },
123 .resource = nhk8815_nand_resources,
124 .num_resources = ARRAY_SIZE(nhk8815_nand_resources),
125}; 128};
126 129
127/* These are the partitions for the OneNand device, different from above */ 130/* These are the partitions for the OneNand device, different from above */
@@ -176,6 +179,10 @@ static struct platform_device nhk8815_onenand_device = {
176 .num_resources = ARRAY_SIZE(nhk8815_onenand_resource), 179 .num_resources = ARRAY_SIZE(nhk8815_onenand_resource),
177}; 180};
178 181
182/* bus control reg. and bus timing reg. for CS0..CS3 */
183#define FSMC_BCR(x) (NOMADIK_FSMC_VA + (x << 3))
184#define FSMC_BTR(x) (NOMADIK_FSMC_VA + (x << 3) + 0x04)
185
179static void __init nhk8815_onenand_init(void) 186static void __init nhk8815_onenand_init(void)
180{ 187{
181#ifdef CONFIG_MTD_ONENAND 188#ifdef CONFIG_MTD_ONENAND
diff --git a/arch/arm/mach-nomadik/include/mach/fsmc.h b/arch/arm/mach-nomadik/include/mach/fsmc.h
deleted file mode 100644
index 8c2c05183685..000000000000
--- a/arch/arm/mach-nomadik/include/mach/fsmc.h
+++ /dev/null
@@ -1,29 +0,0 @@
1
2/* Definitions for the Nomadik FSMC "Flexible Static Memory controller" */
3
4#ifndef __ASM_ARCH_FSMC_H
5#define __ASM_ARCH_FSMC_H
6
7#include <mach/hardware.h>
8/*
9 * Register list
10 */
11
12/* bus control reg. and bus timing reg. for CS0..CS3 */
13#define FSMC_BCR(x) (NOMADIK_FSMC_VA + (x << 3))
14#define FSMC_BTR(x) (NOMADIK_FSMC_VA + (x << 3) + 0x04)
15
16/* PC-card and NAND:
17 * PCR = control register
18 * PMEM = memory timing
19 * PATT = attribute timing
20 * PIO = I/O timing
21 * PECCR = ECC result
22 */
23#define FSMC_PCR(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x00)
24#define FSMC_PMEM(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x08)
25#define FSMC_PATT(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x0c)
26#define FSMC_PIO(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x10)
27#define FSMC_PECCR(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x14)
28
29#endif /* __ASM_ARCH_FSMC_H */
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index a4e167c55c1d..0abb30fe399c 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -16,10 +16,12 @@
16#include <linux/gpio.h> 16#include <linux/gpio.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/irq.h>
19#include <linux/stddef.h> 20#include <linux/stddef.h>
20#include <linux/i2c.h> 21#include <linux/i2c.h>
21#include <linux/spi/spi.h> 22#include <linux/spi/spi.h>
22#include <linux/usb/musb.h> 23#include <linux/usb/musb.h>
24#include <linux/platform_data/i2c-cbus-gpio.h>
23#include <linux/platform_data/spi-omap2-mcspi.h> 25#include <linux/platform_data/spi-omap2-mcspi.h>
24#include <linux/platform_data/mtd-onenand-omap2.h> 26#include <linux/platform_data/mtd-onenand-omap2.h>
25#include <linux/mfd/menelaus.h> 27#include <linux/mfd/menelaus.h>
@@ -40,6 +42,45 @@
40#define TUSB6010_GPIO_ENABLE 0 42#define TUSB6010_GPIO_ENABLE 0
41#define TUSB6010_DMACHAN 0x3f 43#define TUSB6010_DMACHAN 0x3f
42 44
45#if defined(CONFIG_I2C_CBUS_GPIO) || defined(CONFIG_I2C_CBUS_GPIO_MODULE)
46static struct i2c_cbus_platform_data n8x0_cbus_data = {
47 .clk_gpio = 66,
48 .dat_gpio = 65,
49 .sel_gpio = 64,
50};
51
52static struct platform_device n8x0_cbus_device = {
53 .name = "i2c-cbus-gpio",
54 .id = 3,
55 .dev = {
56 .platform_data = &n8x0_cbus_data,
57 },
58};
59
60static struct i2c_board_info n8x0_i2c_board_info_3[] __initdata = {
61 {
62 I2C_BOARD_INFO("retu-mfd", 0x01),
63 },
64};
65
66static void __init n8x0_cbus_init(void)
67{
68 const int retu_irq_gpio = 108;
69
70 if (gpio_request_one(retu_irq_gpio, GPIOF_IN, "Retu IRQ"))
71 return;
72 irq_set_irq_type(gpio_to_irq(retu_irq_gpio), IRQ_TYPE_EDGE_RISING);
73 n8x0_i2c_board_info_3[0].irq = gpio_to_irq(retu_irq_gpio);
74 i2c_register_board_info(3, n8x0_i2c_board_info_3,
75 ARRAY_SIZE(n8x0_i2c_board_info_3));
76 platform_device_register(&n8x0_cbus_device);
77}
78#else /* CONFIG_I2C_CBUS_GPIO */
79static void __init n8x0_cbus_init(void)
80{
81}
82#endif /* CONFIG_I2C_CBUS_GPIO */
83
43#if defined(CONFIG_USB_MUSB_TUSB6010) || defined(CONFIG_USB_MUSB_TUSB6010_MODULE) 84#if defined(CONFIG_USB_MUSB_TUSB6010) || defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
44/* 85/*
45 * Enable or disable power to TUSB6010. When enabling, turn on 3.3 V and 86 * Enable or disable power to TUSB6010. When enabling, turn on 3.3 V and
@@ -678,6 +719,7 @@ static void __init n8x0_init_machine(void)
678 gpmc_onenand_init(board_onenand_data); 719 gpmc_onenand_init(board_onenand_data);
679 n8x0_mmc_init(); 720 n8x0_mmc_init();
680 n8x0_usb_init(); 721 n8x0_usb_init();
722 n8x0_cbus_init();
681} 723}
682 724
683MACHINE_START(NOKIA_N800, "Nokia N800") 725MACHINE_START(NOKIA_N800, "Nokia N800")
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 60529e0b3d67..cf07e289b4ea 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -256,6 +256,11 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
256 }, 256 },
257}; 257};
258 258
259static struct platform_device rx51_battery_device = {
260 .name = "rx51-battery",
261 .id = -1,
262};
263
259static void rx51_charger_set_power(bool on) 264static void rx51_charger_set_power(bool on)
260{ 265{
261 gpio_set_value(RX51_USB_TRANSCEIVER_RST_GPIO, on); 266 gpio_set_value(RX51_USB_TRANSCEIVER_RST_GPIO, on);
@@ -277,6 +282,7 @@ static void __init rx51_charger_init(void)
277 WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO, 282 WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
278 GPIOF_OUT_INIT_HIGH, "isp1704_reset")); 283 GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
279 284
285 platform_device_register(&rx51_battery_device);
280 platform_device_register(&rx51_charger_device); 286 platform_device_register(&rx51_charger_device);
281} 287}
282 288
diff --git a/arch/arm/mach-omap2/i2c.c b/arch/arm/mach-omap2/i2c.c
index fbb9b152cd5e..df6d6acbc9ed 100644
--- a/arch/arm/mach-omap2/i2c.c
+++ b/arch/arm/mach-omap2/i2c.c
@@ -120,6 +120,16 @@ static int __init omap_i2c_nr_ports(void)
120 return ports; 120 return ports;
121} 121}
122 122
123/*
124 * XXX This function is a temporary compatibility wrapper - only
125 * needed until the I2C driver can be converted to call
126 * omap_pm_set_max_dev_wakeup_lat() and handle a return code.
127 */
128static void omap_pm_set_max_mpu_wakeup_lat_compat(struct device *dev, long t)
129{
130 omap_pm_set_max_mpu_wakeup_lat(dev, t);
131}
132
123static const char name[] = "omap_i2c"; 133static const char name[] = "omap_i2c";
124 134
125int __init omap_i2c_add_bus(struct omap_i2c_bus_platform_data *i2c_pdata, 135int __init omap_i2c_add_bus(struct omap_i2c_bus_platform_data *i2c_pdata,
@@ -157,6 +167,15 @@ int __init omap_i2c_add_bus(struct omap_i2c_bus_platform_data *i2c_pdata,
157 dev_attr = (struct omap_i2c_dev_attr *)oh->dev_attr; 167 dev_attr = (struct omap_i2c_dev_attr *)oh->dev_attr;
158 pdata->flags = dev_attr->flags; 168 pdata->flags = dev_attr->flags;
159 169
170 /*
171 * When waiting for completion of a i2c transfer, we need to
172 * set a wake up latency constraint for the MPU. This is to
173 * ensure quick enough wakeup from idle, when transfer
174 * completes.
175 * Only omap3 has support for constraints
176 */
177 if (cpu_is_omap34xx())
178 pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat;
160 pdev = omap_device_build(name, bus_id, oh, pdata, 179 pdev = omap_device_build(name, bus_id, oh, pdata,
161 sizeof(struct omap_i2c_bus_platform_data), 180 sizeof(struct omap_i2c_bus_platform_data),
162 NULL, 0, 0); 181 NULL, 0, 0);
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index 6c8fa70ddadd..d2d3840557c3 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -77,8 +77,7 @@ static struct omap_hwmod_class i2c_class = {
77 77
78static struct omap_i2c_dev_attr i2c_dev_attr = { 78static struct omap_i2c_dev_attr i2c_dev_attr = {
79 .fifo_depth = 8, /* bytes */ 79 .fifo_depth = 8, /* bytes */
80 .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 | 80 .flags = OMAP_I2C_FLAG_BUS_SHIFT_2 |
81 OMAP_I2C_FLAG_BUS_SHIFT_2 |
82 OMAP_I2C_FLAG_FORCE_19200_INT_CLK, 81 OMAP_I2C_FLAG_FORCE_19200_INT_CLK,
83}; 82};
84 83
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 32820d89f5b4..081c71edddf4 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -1118,8 +1118,7 @@ static struct omap_hwmod_class i2c_class = {
1118}; 1118};
1119 1119
1120static struct omap_i2c_dev_attr i2c_dev_attr = { 1120static struct omap_i2c_dev_attr i2c_dev_attr = {
1121 .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE | 1121 .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE,
1122 OMAP_I2C_FLAG_RESET_REGS_POSTIDLE,
1123}; 1122};
1124 1123
1125/* i2c1 */ 1124/* i2c1 */
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index ec4499e5a4c9..8bb2628df34e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -794,9 +794,7 @@ static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
794/* I2C1 */ 794/* I2C1 */
795static struct omap_i2c_dev_attr i2c1_dev_attr = { 795static struct omap_i2c_dev_attr i2c1_dev_attr = {
796 .fifo_depth = 8, /* bytes */ 796 .fifo_depth = 8, /* bytes */
797 .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 | 797 .flags = OMAP_I2C_FLAG_BUS_SHIFT_2,
798 OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
799 OMAP_I2C_FLAG_BUS_SHIFT_2,
800}; 798};
801 799
802static struct omap_hwmod omap3xxx_i2c1_hwmod = { 800static struct omap_hwmod omap3xxx_i2c1_hwmod = {
@@ -821,9 +819,7 @@ static struct omap_hwmod omap3xxx_i2c1_hwmod = {
821/* I2C2 */ 819/* I2C2 */
822static struct omap_i2c_dev_attr i2c2_dev_attr = { 820static struct omap_i2c_dev_attr i2c2_dev_attr = {
823 .fifo_depth = 8, /* bytes */ 821 .fifo_depth = 8, /* bytes */
824 .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 | 822 .flags = OMAP_I2C_FLAG_BUS_SHIFT_2,
825 OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
826 OMAP_I2C_FLAG_BUS_SHIFT_2,
827}; 823};
828 824
829static struct omap_hwmod omap3xxx_i2c2_hwmod = { 825static struct omap_hwmod omap3xxx_i2c2_hwmod = {
@@ -848,9 +844,7 @@ static struct omap_hwmod omap3xxx_i2c2_hwmod = {
848/* I2C3 */ 844/* I2C3 */
849static struct omap_i2c_dev_attr i2c3_dev_attr = { 845static struct omap_i2c_dev_attr i2c3_dev_attr = {
850 .fifo_depth = 64, /* bytes */ 846 .fifo_depth = 64, /* bytes */
851 .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 | 847 .flags = OMAP_I2C_FLAG_BUS_SHIFT_2,
852 OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
853 OMAP_I2C_FLAG_BUS_SHIFT_2,
854}; 848};
855 849
856static struct omap_hwmod_irq_info i2c3_mpu_irqs[] = { 850static struct omap_hwmod_irq_info i2c3_mpu_irqs[] = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index eb61cfd9452b..272b0178dba6 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -1529,8 +1529,7 @@ static struct omap_hwmod_class omap44xx_i2c_hwmod_class = {
1529}; 1529};
1530 1530
1531static struct omap_i2c_dev_attr i2c_dev_attr = { 1531static struct omap_i2c_dev_attr i2c_dev_attr = {
1532 .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE | 1532 .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE,
1533 OMAP_I2C_FLAG_RESET_REGS_POSTIDLE,
1534}; 1533};
1535 1534
1536/* i2c1 */ 1535/* i2c1 */
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index 12f3994c43db..8b204ae69002 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -250,6 +250,18 @@ static struct resource rtc_resources[] = {
250 */ 250 */
251static struct resource fsmc_resources[] = { 251static struct resource fsmc_resources[] = {
252 { 252 {
253 .name = "nand_addr",
254 .start = U300_NAND_CS0_PHYS_BASE + PLAT_NAND_ALE,
255 .end = U300_NAND_CS0_PHYS_BASE + PLAT_NAND_ALE + SZ_16K - 1,
256 .flags = IORESOURCE_MEM,
257 },
258 {
259 .name = "nand_cmd",
260 .start = U300_NAND_CS0_PHYS_BASE + PLAT_NAND_CLE,
261 .end = U300_NAND_CS0_PHYS_BASE + PLAT_NAND_CLE + SZ_16K - 1,
262 .flags = IORESOURCE_MEM,
263 },
264 {
253 .name = "nand_data", 265 .name = "nand_data",
254 .start = U300_NAND_CS0_PHYS_BASE, 266 .start = U300_NAND_CS0_PHYS_BASE,
255 .end = U300_NAND_CS0_PHYS_BASE + SZ_16K - 1, 267 .end = U300_NAND_CS0_PHYS_BASE + SZ_16K - 1,
@@ -1492,8 +1504,6 @@ static struct fsmc_nand_platform_data nand_platform_data = {
1492 .nr_partitions = ARRAY_SIZE(u300_partitions), 1504 .nr_partitions = ARRAY_SIZE(u300_partitions),
1493 .options = NAND_SKIP_BBTSCAN, 1505 .options = NAND_SKIP_BBTSCAN,
1494 .width = FSMC_NAND_BW8, 1506 .width = FSMC_NAND_BW8,
1495 .ale_off = PLAT_NAND_ALE,
1496 .cle_off = PLAT_NAND_CLE,
1497}; 1507};
1498 1508
1499static struct platform_device nand_device = { 1509static struct platform_device nand_device = {
diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h
index 32567bc2a421..ac12ae2b9286 100644
--- a/arch/cris/include/asm/io.h
+++ b/arch/cris/include/asm/io.h
@@ -133,12 +133,39 @@ static inline void writel(unsigned int b, volatile void __iomem *addr)
133#define insb(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,1,count) : 0) 133#define insb(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,1,count) : 0)
134#define insw(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,2,count) : 0) 134#define insw(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,2,count) : 0)
135#define insl(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,4,count) : 0) 135#define insl(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,4,count) : 0)
136#define outb(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,1,1) 136static inline void outb(unsigned char data, unsigned int port)
137#define outw(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,2,1) 137{
138#define outl(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,4,1) 138 if (cris_iops)
139#define outsb(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,1,count) 139 cris_iops->write_io(port, (void *) &data, 1, 1);
140#define outsw(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,2,count) 140}
141#define outsl(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,3,count) 141static inline void outw(unsigned short data, unsigned int port)
142{
143 if (cris_iops)
144 cris_iops->write_io(port, (void *) &data, 2, 1);
145}
146static inline void outl(unsigned int data, unsigned int port)
147{
148 if (cris_iops)
149 cris_iops->write_io(port, (void *) &data, 4, 1);
150}
151static inline void outsb(unsigned int port, const void *addr,
152 unsigned long count)
153{
154 if (cris_iops)
155 cris_iops->write_io(port, (void *)addr, 1, count);
156}
157static inline void outsw(unsigned int port, const void *addr,
158 unsigned long count)
159{
160 if (cris_iops)
161 cris_iops->write_io(port, (void *)addr, 2, count);
162}
163static inline void outsl(unsigned int port, const void *addr,
164 unsigned long count)
165{
166 if (cris_iops)
167 cris_iops->write_io(port, (void *)addr, 4, count);
168}
142 169
143/* 170/*
144 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 171 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
diff --git a/arch/cris/kernel/module.c b/arch/cris/kernel/module.c
index 37400f5869e6..51123f985eb5 100644
--- a/arch/cris/kernel/module.c
+++ b/arch/cris/kernel/module.c
@@ -32,8 +32,6 @@
32#ifdef CONFIG_ETRAX_KMALLOCED_MODULES 32#ifdef CONFIG_ETRAX_KMALLOCED_MODULES
33void *module_alloc(unsigned long size) 33void *module_alloc(unsigned long size)
34{ 34{
35 if (size == 0)
36 return NULL;
37 return kmalloc(size, GFP_KERNEL); 35 return kmalloc(size, GFP_KERNEL);
38} 36}
39 37
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 04bef4d25b4a..0ae445087607 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -3,6 +3,7 @@ config H8300
3 default y 3 default y
4 select HAVE_IDE 4 select HAVE_IDE
5 select HAVE_GENERIC_HARDIRQS 5 select HAVE_GENERIC_HARDIRQS
6 select GENERIC_ATOMIC64
6 select HAVE_UID16 7 select HAVE_UID16
7 select ARCH_WANT_IPC_PARSE_VERSION 8 select ARCH_WANT_IPC_PARSE_VERSION
8 select GENERIC_IRQ_SHOW 9 select GENERIC_IRQ_SHOW
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 5e34ccf39a49..2a625fb063e1 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -214,8 +214,6 @@ static inline int reassemble_22(int as22)
214 214
215void *module_alloc(unsigned long size) 215void *module_alloc(unsigned long size)
216{ 216{
217 if (size == 0)
218 return NULL;
219 /* using RWX means less protection for modules, but it's 217 /* using RWX means less protection for modules, but it's
220 * easier than trying to map the text, data, init_text and 218 * easier than trying to map the text, data, init_text and
221 * init_data correctly */ 219 * init_data correctly */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index cec8aae5cbf8..97909d3b1d7b 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -356,3 +356,4 @@ COMPAT_SYS_SPU(sendmmsg)
356SYSCALL_SPU(setns) 356SYSCALL_SPU(setns)
357COMPAT_SYS(process_vm_readv) 357COMPAT_SYS(process_vm_readv)
358COMPAT_SYS(process_vm_writev) 358COMPAT_SYS(process_vm_writev)
359SYSCALL(finit_module)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index bcbbe413c606..29365e15ed7c 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define __NR_syscalls 353 15#define __NR_syscalls 354
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18#define NR_syscalls __NR_syscalls 18#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 380b5d37a904..8c478c6c6b1e 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -375,6 +375,7 @@
375#define __NR_setns 350 375#define __NR_setns 350
376#define __NR_process_vm_readv 351 376#define __NR_process_vm_readv 351
377#define __NR_process_vm_writev 352 377#define __NR_process_vm_writev 352
378#define __NR_finit_module 353
378 379
379 380
380#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 381#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index f1ddc0d23679..4435488ebe25 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -43,10 +43,6 @@ void *module_alloc(unsigned long size)
43{ 43{
44 void *ret; 44 void *ret;
45 45
46 /* We handle the zero case fine, unlike vmalloc */
47 if (size == 0)
48 return NULL;
49
50 ret = module_map(size); 46 ret = module_map(size);
51 if (ret) 47 if (ret)
52 memset(ret, 0, size); 48 memset(ret, 0, size);
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h
index b73e1039c911..ff8a93408823 100644
--- a/arch/tile/include/asm/elf.h
+++ b/arch/tile/include/asm/elf.h
@@ -170,4 +170,6 @@ do { \
170 170
171#endif /* CONFIG_COMPAT */ 171#endif /* CONFIG_COMPAT */
172 172
173#define CORE_DUMP_USE_REGSET
174
173#endif /* _ASM_TILE_ELF_H */ 175#endif /* _ASM_TILE_ELF_H */
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
index 1a4fd9ab0ee1..5ce052e16b7b 100644
--- a/arch/tile/include/asm/ptrace.h
+++ b/arch/tile/include/asm/ptrace.h
@@ -24,8 +24,7 @@ typedef unsigned long pt_reg_t;
24#include <uapi/asm/ptrace.h> 24#include <uapi/asm/ptrace.h>
25 25
26#define PTRACE_O_MASK_TILE (PTRACE_O_TRACEMIGRATE) 26#define PTRACE_O_MASK_TILE (PTRACE_O_TRACEMIGRATE)
27#define PT_TRACE_MIGRATE 0x00080000 27#define PT_TRACE_MIGRATE PT_EVENT_FLAG(PTRACE_EVENT_MIGRATE)
28#define PT_TRACE_MASK_TILE (PT_TRACE_MIGRATE)
29 28
30/* Flag bits in pt_regs.flags */ 29/* Flag bits in pt_regs.flags */
31#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */ 30#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */
diff --git a/arch/tile/include/uapi/asm/ptrace.h b/arch/tile/include/uapi/asm/ptrace.h
index c717d0fec72e..7757e1985fb6 100644
--- a/arch/tile/include/uapi/asm/ptrace.h
+++ b/arch/tile/include/uapi/asm/ptrace.h
@@ -81,8 +81,14 @@ struct pt_regs {
81#define PTRACE_SETFPREGS 15 81#define PTRACE_SETFPREGS 15
82 82
83/* Support TILE-specific ptrace options, with events starting at 16. */ 83/* Support TILE-specific ptrace options, with events starting at 16. */
84#define PTRACE_O_TRACEMIGRATE 0x00010000
85#define PTRACE_EVENT_MIGRATE 16 84#define PTRACE_EVENT_MIGRATE 16
85#define PTRACE_O_TRACEMIGRATE (1 << PTRACE_EVENT_MIGRATE)
86 86
87/*
88 * Flag bits in pt_regs.flags that are part of the ptrace API.
89 * We start our numbering higher up to avoid confusion with the
90 * non-ABI kernel-internal values that use the low 16 bits.
91 */
92#define PT_FLAGS_COMPAT 0x10000 /* process is an -m32 compat process */
87 93
88#endif /* _UAPI_ASM_TILE_PTRACE_H */ 94#endif /* _UAPI_ASM_TILE_PTRACE_H */
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index 243ffebe38d6..4918d91bc3a6 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -42,8 +42,6 @@ void *module_alloc(unsigned long size)
42 int i = 0; 42 int i = 0;
43 int npages; 43 int npages;
44 44
45 if (size == 0)
46 return NULL;
47 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 45 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
48 pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL); 46 pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
49 if (pages == NULL) 47 if (pages == NULL)
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index 759822687e8f..aac1cd586966 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -245,7 +245,7 @@ static void __devinit fixup_read_and_payload_sizes(void)
245 u16 new_values; 245 u16 new_values;
246 246
247 /* Scan for the smallest maximum payload size. */ 247 /* Scan for the smallest maximum payload size. */
248 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 248 for_each_pci_dev(dev) {
249 u32 devcap; 249 u32 devcap;
250 int max_payload; 250 int max_payload;
251 251
@@ -260,7 +260,7 @@ static void __devinit fixup_read_and_payload_sizes(void)
260 260
261 /* Now, set the max_payload_size for all devices to that value. */ 261 /* Now, set the max_payload_size for all devices to that value. */
262 new_values = (max_read_size << 12) | (smallest_max_payload << 5); 262 new_values = (max_read_size << 12) | (smallest_max_payload << 5);
263 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) 263 for_each_pci_dev(dev)
264 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 264 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
265 PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ, 265 PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ,
266 new_values); 266 new_values);
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index 2ba6d052f85d..94810d4a6332 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -1047,8 +1047,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
1047} 1047}
1048 1048
1049/* Called for each device after PCI setup is done. */ 1049/* Called for each device after PCI setup is done. */
1050static void __init 1050static void pcibios_fixup_final(struct pci_dev *pdev)
1051pcibios_fixup_final(struct pci_dev *pdev)
1052{ 1051{
1053 set_dma_ops(&pdev->dev, gx_pci_dma_map_ops); 1052 set_dma_ops(&pdev->dev, gx_pci_dma_map_ops);
1054 set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET); 1053 set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET);
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index e92e40527d6d..9835312d5a91 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -19,7 +19,10 @@
19#include <linux/kprobes.h> 19#include <linux/kprobes.h>
20#include <linux/compat.h> 20#include <linux/compat.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/regset.h>
23#include <linux/elf.h>
22#include <asm/traps.h> 24#include <asm/traps.h>
25#include <arch/chip.h>
23 26
24void user_enable_single_step(struct task_struct *child) 27void user_enable_single_step(struct task_struct *child)
25{ 28{
@@ -45,6 +48,100 @@ void ptrace_disable(struct task_struct *child)
45 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 48 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
46} 49}
47 50
51/*
52 * Get registers from task and ready the result for userspace.
53 * Note that we localize the API issues to getregs() and putregs() at
54 * some cost in performance, e.g. we need a full pt_regs copy for
55 * PEEKUSR, and two copies for POKEUSR. But in general we expect
56 * GETREGS/PUTREGS to be the API of choice anyway.
57 */
58static char *getregs(struct task_struct *child, struct pt_regs *uregs)
59{
60 *uregs = *task_pt_regs(child);
61
62 /* Set up flags ABI bits. */
63 uregs->flags = 0;
64#ifdef CONFIG_COMPAT
65 if (task_thread_info(child)->status & TS_COMPAT)
66 uregs->flags |= PT_FLAGS_COMPAT;
67#endif
68
69 return (char *)uregs;
70}
71
72/* Put registers back to task. */
73static void putregs(struct task_struct *child, struct pt_regs *uregs)
74{
75 struct pt_regs *regs = task_pt_regs(child);
76
77 /* Don't allow overwriting the kernel-internal flags word. */
78 uregs->flags = regs->flags;
79
80 /* Only allow setting the ICS bit in the ex1 word. */
81 uregs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(uregs->ex1));
82
83 *regs = *uregs;
84}
85
86enum tile_regset {
87 REGSET_GPR,
88};
89
90static int tile_gpr_get(struct task_struct *target,
91 const struct user_regset *regset,
92 unsigned int pos, unsigned int count,
93 void *kbuf, void __user *ubuf)
94{
95 struct pt_regs regs;
96
97 getregs(target, &regs);
98
99 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &regs, 0,
100 sizeof(regs));
101}
102
103static int tile_gpr_set(struct task_struct *target,
104 const struct user_regset *regset,
105 unsigned int pos, unsigned int count,
106 const void *kbuf, const void __user *ubuf)
107{
108 int ret;
109 struct pt_regs regs;
110
111 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
112 sizeof(regs));
113 if (ret)
114 return ret;
115
116 putregs(target, &regs);
117
118 return 0;
119}
120
121static const struct user_regset tile_user_regset[] = {
122 [REGSET_GPR] = {
123 .core_note_type = NT_PRSTATUS,
124 .n = ELF_NGREG,
125 .size = sizeof(elf_greg_t),
126 .align = sizeof(elf_greg_t),
127 .get = tile_gpr_get,
128 .set = tile_gpr_set,
129 },
130};
131
132static const struct user_regset_view tile_user_regset_view = {
133 .name = CHIP_ARCH_NAME,
134 .e_machine = ELF_ARCH,
135 .ei_osabi = ELF_OSABI,
136 .regsets = tile_user_regset,
137 .n = ARRAY_SIZE(tile_user_regset),
138};
139
140const struct user_regset_view *task_user_regset_view(struct task_struct *task)
141{
142 return &tile_user_regset_view;
143}
144
48long arch_ptrace(struct task_struct *child, long request, 145long arch_ptrace(struct task_struct *child, long request,
49 unsigned long addr, unsigned long data) 146 unsigned long addr, unsigned long data)
50{ 147{
@@ -53,14 +150,13 @@ long arch_ptrace(struct task_struct *child, long request,
53 long ret = -EIO; 150 long ret = -EIO;
54 char *childreg; 151 char *childreg;
55 struct pt_regs copyregs; 152 struct pt_regs copyregs;
56 int ex1_offset;
57 153
58 switch (request) { 154 switch (request) {
59 155
60 case PTRACE_PEEKUSR: /* Read register from pt_regs. */ 156 case PTRACE_PEEKUSR: /* Read register from pt_regs. */
61 if (addr >= PTREGS_SIZE) 157 if (addr >= PTREGS_SIZE)
62 break; 158 break;
63 childreg = (char *)task_pt_regs(child) + addr; 159 childreg = getregs(child, &copyregs) + addr;
64#ifdef CONFIG_COMPAT 160#ifdef CONFIG_COMPAT
65 if (is_compat_task()) { 161 if (is_compat_task()) {
66 if (addr & (sizeof(compat_long_t)-1)) 162 if (addr & (sizeof(compat_long_t)-1))
@@ -79,17 +175,7 @@ long arch_ptrace(struct task_struct *child, long request,
79 case PTRACE_POKEUSR: /* Write register in pt_regs. */ 175 case PTRACE_POKEUSR: /* Write register in pt_regs. */
80 if (addr >= PTREGS_SIZE) 176 if (addr >= PTREGS_SIZE)
81 break; 177 break;
82 childreg = (char *)task_pt_regs(child) + addr; 178 childreg = getregs(child, &copyregs) + addr;
83
84 /* Guard against overwrites of the privilege level. */
85 ex1_offset = PTREGS_OFFSET_EX1;
86#if defined(CONFIG_COMPAT) && defined(__BIG_ENDIAN)
87 if (is_compat_task()) /* point at low word */
88 ex1_offset += sizeof(compat_long_t);
89#endif
90 if (addr == ex1_offset)
91 data = PL_ICS_EX1(USER_PL, EX1_ICS(data));
92
93#ifdef CONFIG_COMPAT 179#ifdef CONFIG_COMPAT
94 if (is_compat_task()) { 180 if (is_compat_task()) {
95 if (addr & (sizeof(compat_long_t)-1)) 181 if (addr & (sizeof(compat_long_t)-1))
@@ -102,24 +188,20 @@ long arch_ptrace(struct task_struct *child, long request,
102 break; 188 break;
103 *(long *)childreg = data; 189 *(long *)childreg = data;
104 } 190 }
191 putregs(child, &copyregs);
105 ret = 0; 192 ret = 0;
106 break; 193 break;
107 194
108 case PTRACE_GETREGS: /* Get all registers from the child. */ 195 case PTRACE_GETREGS: /* Get all registers from the child. */
109 if (copy_to_user(datap, task_pt_regs(child), 196 ret = copy_regset_to_user(child, &tile_user_regset_view,
110 sizeof(struct pt_regs)) == 0) { 197 REGSET_GPR, 0,
111 ret = 0; 198 sizeof(struct pt_regs), datap);
112 }
113 break; 199 break;
114 200
115 case PTRACE_SETREGS: /* Set all registers in the child. */ 201 case PTRACE_SETREGS: /* Set all registers in the child. */
116 if (copy_from_user(&copyregs, datap, 202 ret = copy_regset_from_user(child, &tile_user_regset_view,
117 sizeof(struct pt_regs)) == 0) { 203 REGSET_GPR, 0,
118 copyregs.ex1 = 204 sizeof(struct pt_regs), datap);
119 PL_ICS_EX1(USER_PL, EX1_ICS(copyregs.ex1));
120 *task_pt_regs(child) = copyregs;
121 ret = 0;
122 }
123 break; 205 break;
124 206
125 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 207 case PTRACE_GETFPREGS: /* Get the child FPU state. */
@@ -128,12 +210,16 @@ long arch_ptrace(struct task_struct *child, long request,
128 210
129 case PTRACE_SETOPTIONS: 211 case PTRACE_SETOPTIONS:
130 /* Support TILE-specific ptrace options. */ 212 /* Support TILE-specific ptrace options. */
131 child->ptrace &= ~PT_TRACE_MASK_TILE; 213 BUILD_BUG_ON(PTRACE_O_MASK_TILE & PTRACE_O_MASK);
132 tmp = data & PTRACE_O_MASK_TILE; 214 tmp = data & PTRACE_O_MASK_TILE;
133 data &= ~PTRACE_O_MASK_TILE; 215 data &= ~PTRACE_O_MASK_TILE;
134 ret = ptrace_request(child, request, addr, data); 216 ret = ptrace_request(child, request, addr, data);
135 if (tmp & PTRACE_O_TRACEMIGRATE) 217 if (ret == 0) {
136 child->ptrace |= PT_TRACE_MIGRATE; 218 unsigned int flags = child->ptrace;
219 flags &= ~(PTRACE_O_MASK_TILE << PT_OPT_FLAG_SHIFT);
220 flags |= (tmp << PT_OPT_FLAG_SHIFT);
221 child->ptrace = flags;
222 }
137 break; 223 break;
138 224
139 default: 225 default:
diff --git a/arch/unicore32/kernel/module.c b/arch/unicore32/kernel/module.c
index 8fbe8577f5e6..16bd1495b934 100644
--- a/arch/unicore32/kernel/module.c
+++ b/arch/unicore32/kernel/module.c
@@ -27,9 +27,6 @@ void *module_alloc(unsigned long size)
27 struct vm_struct *area; 27 struct vm_struct *area;
28 28
29 size = PAGE_ALIGN(size); 29 size = PAGE_ALIGN(size);
30 if (!size)
31 return NULL;
32
33 area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END); 30 area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
34 if (!area) 31 if (!area)
35 return NULL; 32 return NULL;
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index fbd895562292..3286a92e662a 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -26,11 +26,6 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
26#ifdef CONFIG_X86_32 26#ifdef CONFIG_X86_32
27static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) 27static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
28{ 28{
29 /*
30 * We use exception 16 if we have hardware math and we've either seen
31 * it or the CPU claims it is internal
32 */
33 int fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu);
34 seq_printf(m, 29 seq_printf(m,
35 "fdiv_bug\t: %s\n" 30 "fdiv_bug\t: %s\n"
36 "hlt_bug\t\t: %s\n" 31 "hlt_bug\t\t: %s\n"
@@ -45,7 +40,7 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
45 c->f00f_bug ? "yes" : "no", 40 c->f00f_bug ? "yes" : "no",
46 c->coma_bug ? "yes" : "no", 41 c->coma_bug ? "yes" : "no",
47 c->hard_math ? "yes" : "no", 42 c->hard_math ? "yes" : "no",
48 fpu_exception ? "yes" : "no", 43 c->hard_math ? "yes" : "no",
49 c->cpuid_level, 44 c->cpuid_level,
50 c->wp_works_ok ? "yes" : "no"); 45 c->wp_works_ok ? "yes" : "no");
51} 46}
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 6e03b0d69138..7dc4e459c2b3 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -42,39 +42,6 @@
42 * (these are usually mapped into the 0x30-0xff vector range) 42 * (these are usually mapped into the 0x30-0xff vector range)
43 */ 43 */
44 44
45#ifdef CONFIG_X86_32
46/*
47 * Note that on a 486, we don't want to do a SIGFPE on an irq13
48 * as the irq is unreliable, and exception 16 works correctly
49 * (ie as explained in the intel literature). On a 386, you
50 * can't use exception 16 due to bad IBM design, so we have to
51 * rely on the less exact irq13.
52 *
53 * Careful.. Not only is IRQ13 unreliable, but it is also
54 * leads to races. IBM designers who came up with it should
55 * be shot.
56 */
57
58static irqreturn_t math_error_irq(int cpl, void *dev_id)
59{
60 outb(0, 0xF0);
61 if (ignore_fpu_irq || !boot_cpu_data.hard_math)
62 return IRQ_NONE;
63 math_error(get_irq_regs(), 0, X86_TRAP_MF);
64 return IRQ_HANDLED;
65}
66
67/*
68 * New motherboards sometimes make IRQ 13 be a PCI interrupt,
69 * so allow interrupt sharing.
70 */
71static struct irqaction fpu_irq = {
72 .handler = math_error_irq,
73 .name = "fpu",
74 .flags = IRQF_NO_THREAD,
75};
76#endif
77
78/* 45/*
79 * IRQ2 is cascade interrupt to second interrupt controller 46 * IRQ2 is cascade interrupt to second interrupt controller
80 */ 47 */
@@ -242,13 +209,6 @@ void __init native_init_IRQ(void)
242 setup_irq(2, &irq2); 209 setup_irq(2, &irq2);
243 210
244#ifdef CONFIG_X86_32 211#ifdef CONFIG_X86_32
245 /*
246 * External FPU? Set up irq13 if so, for
247 * original braindamaged IBM FERR coupling.
248 */
249 if (boot_cpu_data.hard_math && !cpu_has_fpu)
250 setup_irq(FPU_IRQ, &fpu_irq);
251
252 irq_ctx_init(smp_processor_id()); 212 irq_ctx_init(smp_processor_id());
253#endif 213#endif
254} 214}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index eb8586693e0b..ecffca11f4e9 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -69,9 +69,6 @@
69 69
70asmlinkage int system_call(void); 70asmlinkage int system_call(void);
71 71
72/* Do we ignore FPU interrupts ? */
73char ignore_fpu_irq;
74
75/* 72/*
76 * The IDT has to be page-aligned to simplify the Pentium 73 * The IDT has to be page-aligned to simplify the Pentium
77 * F0 0F bug workaround. 74 * F0 0F bug workaround.
@@ -564,9 +561,6 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
564 561
565dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 562dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
566{ 563{
567#ifdef CONFIG_X86_32
568 ignore_fpu_irq = 1;
569#endif
570 exception_enter(regs); 564 exception_enter(regs);
571 math_error(regs, error_code, X86_TRAP_MF); 565 math_error(regs, error_code, X86_TRAP_MF);
572 exception_exit(regs); 566 exception_exit(regs);
diff --git a/arch/x86/platform/iris/iris.c b/arch/x86/platform/iris/iris.c
index 5917eb56b313..e6cb80f620af 100644
--- a/arch/x86/platform/iris/iris.c
+++ b/arch/x86/platform/iris/iris.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/moduleparam.h> 24#include <linux/moduleparam.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/platform_device.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
27#include <linux/errno.h> 28#include <linux/errno.h>
28#include <linux/delay.h> 29#include <linux/delay.h>
@@ -62,29 +63,75 @@ static void iris_power_off(void)
62 * by reading its input port and seeing whether the read value is 63 * by reading its input port and seeing whether the read value is
63 * meaningful. 64 * meaningful.
64 */ 65 */
65static int iris_init(void) 66static int iris_probe(struct platform_device *pdev)
66{ 67{
67 unsigned char status; 68 unsigned char status = inb(IRIS_GIO_INPUT);
68 if (force != 1) {
69 printk(KERN_ERR "The force parameter has not been set to 1 so the Iris poweroff handler will not be installed.\n");
70 return -ENODEV;
71 }
72 status = inb(IRIS_GIO_INPUT);
73 if (status == IRIS_GIO_NODEV) { 69 if (status == IRIS_GIO_NODEV) {
74 printk(KERN_ERR "This machine does not seem to be an Iris. Power_off handler not installed.\n"); 70 printk(KERN_ERR "This machine does not seem to be an Iris. "
71 "Power off handler not installed.\n");
75 return -ENODEV; 72 return -ENODEV;
76 } 73 }
77 old_pm_power_off = pm_power_off; 74 old_pm_power_off = pm_power_off;
78 pm_power_off = &iris_power_off; 75 pm_power_off = &iris_power_off;
79 printk(KERN_INFO "Iris power_off handler installed.\n"); 76 printk(KERN_INFO "Iris power_off handler installed.\n");
80
81 return 0; 77 return 0;
82} 78}
83 79
84static void iris_exit(void) 80static int iris_remove(struct platform_device *pdev)
85{ 81{
86 pm_power_off = old_pm_power_off; 82 pm_power_off = old_pm_power_off;
87 printk(KERN_INFO "Iris power_off handler uninstalled.\n"); 83 printk(KERN_INFO "Iris power_off handler uninstalled.\n");
84 return 0;
85}
86
87static struct platform_driver iris_driver = {
88 .driver = {
89 .name = "iris",
90 .owner = THIS_MODULE,
91 },
92 .probe = iris_probe,
93 .remove = iris_remove,
94};
95
96static struct resource iris_resources[] = {
97 {
98 .start = IRIS_GIO_BASE,
99 .end = IRIS_GIO_OUTPUT,
100 .flags = IORESOURCE_IO,
101 .name = "address"
102 }
103};
104
105static struct platform_device *iris_device;
106
107static int iris_init(void)
108{
109 int ret;
110 if (force != 1) {
111 printk(KERN_ERR "The force parameter has not been set to 1."
112 " The Iris poweroff handler will not be installed.\n");
113 return -ENODEV;
114 }
115 ret = platform_driver_register(&iris_driver);
116 if (ret < 0) {
117 printk(KERN_ERR "Failed to register iris platform driver: %d\n",
118 ret);
119 return ret;
120 }
121 iris_device = platform_device_register_simple("iris", (-1),
122 iris_resources, ARRAY_SIZE(iris_resources));
123 if (IS_ERR(iris_device)) {
124 printk(KERN_ERR "Failed to register iris platform device\n");
125 platform_driver_unregister(&iris_driver);
126 return PTR_ERR(iris_device);
127 }
128 return 0;
129}
130
131static void iris_exit(void)
132{
133 platform_device_unregister(iris_device);
134 platform_driver_unregister(&iris_driver);
88} 135}
89 136
90module_init(iris_init); 137module_init(iris_init);
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index ee3c220ee500..05f404f53f59 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -356,3 +356,4 @@
356347 i386 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv 356347 i386 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
357348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev 357348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
358349 i386 kcmp sys_kcmp 358349 i386 kcmp sys_kcmp
359350 i386 finit_module sys_finit_module
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index a582bfed95bb..7c58c84b7bc8 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -319,6 +319,7 @@
319310 64 process_vm_readv sys_process_vm_readv 319310 64 process_vm_readv sys_process_vm_readv
320311 64 process_vm_writev sys_process_vm_writev 320311 64 process_vm_writev sys_process_vm_writev
321312 common kcmp sys_kcmp 321312 common kcmp sys_kcmp
322313 common finit_module sys_finit_module
322 323
323# 324#
324# x32-specific system call numbers start at 512 to avoid cache impact 325# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 2481f267be29..73d34e77c39c 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -17,6 +17,7 @@ config XTENSA
17 select GENERIC_KERNEL_EXECVE 17 select GENERIC_KERNEL_EXECVE
18 select ARCH_WANT_OPTIONAL_GPIOLIB 18 select ARCH_WANT_OPTIONAL_GPIOLIB
19 select CLONE_BACKWARDS 19 select CLONE_BACKWARDS
20 select IRQ_DOMAIN
20 help 21 help
21 Xtensa processors are 32-bit RISC machines designed by Tensilica 22 Xtensa processors are 32-bit RISC machines designed by Tensilica
22 primarily for embedded systems. These processors are both 23 primarily for embedded systems. These processors are both
@@ -150,6 +151,15 @@ config XTENSA_PLATFORM_S6105
150 select SERIAL_CONSOLE 151 select SERIAL_CONSOLE
151 select NO_IOPORT 152 select NO_IOPORT
152 153
154config XTENSA_PLATFORM_XTFPGA
155 bool "XTFPGA"
156 select SERIAL_CONSOLE
157 select ETHOC
158 select XTENSA_CALIBRATE_CCOUNT
159 help
160 XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605).
161 This hardware is capable of running a full Linux distribution.
162
153endchoice 163endchoice
154 164
155 165
@@ -177,6 +187,17 @@ config CMDLINE
177 time by entering them here. As a minimum, you should specify the 187 time by entering them here. As a minimum, you should specify the
178 memory size and the root device (e.g., mem=64M root=/dev/nfs). 188 memory size and the root device (e.g., mem=64M root=/dev/nfs).
179 189
190config USE_OF
191 bool "Flattened Device Tree support"
192 select OF
193 select OF_EARLY_FLATTREE
194 help
195 Include support for flattened device tree machine descriptions.
196
197config BUILTIN_DTB
198 string "DTB to build into the kernel image"
199 depends on OF
200
180source "mm/Kconfig" 201source "mm/Kconfig"
181 202
182source "drivers/pcmcia/Kconfig" 203source "drivers/pcmcia/Kconfig"
diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug
index 11c585295dd7..a34010e0e51c 100644
--- a/arch/xtensa/Kconfig.debug
+++ b/arch/xtensa/Kconfig.debug
@@ -2,6 +2,26 @@ menu "Kernel hacking"
2 2
3source "lib/Kconfig.debug" 3source "lib/Kconfig.debug"
4 4
5endmenu 5config LD_NO_RELAX
6 bool "Disable linker relaxation"
7 default n
8 help
9 Enable this function to disable link-time optimizations.
10 The default linker behavior is to combine identical literal
11 values to reduce code size and remove unnecessary overhead from
12 assembler-generated 'longcall' sequences.
13 Enabling this option improves the link time but increases the
14 code size, and possibly execution time.
15
16config S32C1I_SELFTEST
17 bool "Perform S32C1I instruction self-test at boot"
18 default y
19 help
20 Enable this option to test S32C1I instruction behavior at boot.
21 Correct operation of this instruction requires some cooperation from hardware
22 external to the processor (such as bus bridge, bus fabric, or memory controller).
23 It is easy to make wrong hardware configuration, this test should catch it early.
6 24
25 Say 'N' on stable hardware.
7 26
27endmenu
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index bb5ba61723f7..0aa72702f179 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -38,6 +38,7 @@ endif
38platform-$(CONFIG_XTENSA_PLATFORM_XT2000) := xt2000 38platform-$(CONFIG_XTENSA_PLATFORM_XT2000) := xt2000
39platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss 39platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
40platform-$(CONFIG_XTENSA_PLATFORM_S6105) := s6105 40platform-$(CONFIG_XTENSA_PLATFORM_S6105) := s6105
41platform-$(CONFIG_XTENSA_PLATFORM_XTFPGA) := xtfpga
41 42
42PLATFORM = $(platform-y) 43PLATFORM = $(platform-y)
43export PLATFORM 44export PLATFORM
@@ -49,6 +50,17 @@ KBUILD_CFLAGS += -pipe -mlongcalls
49 50
50KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,) 51KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,)
51 52
53ifneq ($(CONFIG_LD_NO_RELAX),)
54LDFLAGS := --no-relax
55endif
56
57ifeq ($(shell echo -e __XTENSA_EB__ | $(CC) -E - | grep -v "\#"),1)
58CHECKFLAGS += -D__XTENSA_EB__
59endif
60ifeq ($(shell echo -e __XTENSA_EL__ | $(CC) -E - | grep -v "\#"),1)
61CHECKFLAGS += -D__XTENSA_EL__
62endif
63
52vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y)) 64vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y))
53plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y)) 65plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y))
54 66
@@ -75,6 +87,10 @@ core-y += $(buildvar) $(buildplf)
75 87
76libs-y += arch/xtensa/lib/ $(LIBGCC) 88libs-y += arch/xtensa/lib/ $(LIBGCC)
77 89
90ifneq ($(CONFIG_BUILTIN_DTB),"")
91core-$(CONFIG_OF) += arch/xtensa/boot/
92endif
93
78boot := arch/xtensa/boot 94boot := arch/xtensa/boot
79 95
80all: zImage 96all: zImage
@@ -84,7 +100,9 @@ bzImage : zImage
84zImage: vmlinux 100zImage: vmlinux
85 $(Q)$(MAKE) $(build)=$(boot) $@ 101 $(Q)$(MAKE) $(build)=$(boot) $@
86 102
103%.dtb:
104 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
105
87define archhelp 106define archhelp
88 @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)' 107 @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
89endef 108endef
90
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
index 4018f8994196..818647e815d7 100644
--- a/arch/xtensa/boot/Makefile
+++ b/arch/xtensa/boot/Makefile
@@ -22,12 +22,35 @@ subdir-y := lib
22# Subdirs for the boot loader(s) 22# Subdirs for the boot loader(s)
23 23
24bootdir-$(CONFIG_XTENSA_PLATFORM_ISS) += boot-elf 24bootdir-$(CONFIG_XTENSA_PLATFORM_ISS) += boot-elf
25bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf 25bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf boot-uboot
26bootdir-$(CONFIG_XTENSA_PLATFORM_XTFPGA) += boot-redboot boot-elf boot-uboot
26 27
27 28
29BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
30ifneq ($(CONFIG_BUILTIN_DTB),"")
31obj-$(CONFIG_OF) += $(BUILTIN_DTB)
32endif
33
34# Rule to build device tree blobs
35$(obj)/%.dtb: $(src)/dts/%.dts FORCE
36 $(call if_changed_dep,dtc)
37
38clean-files := *.dtb.S
39
28zImage Image: $(bootdir-y) 40zImage Image: $(bootdir-y)
29 41
30$(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \ 42$(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \
31 $(addprefix $(obj)/,$(host-progs)) 43 $(addprefix $(obj)/,$(host-progs))
32 $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS) 44 $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
33 45
46OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary
47
48vmlinux.bin: vmlinux FORCE
49 $(call if_changed,objcopy)
50
51vmlinux.bin.gz: vmlinux.bin FORCE
52 $(call if_changed,gzip)
53
54boot-elf: vmlinux.bin
55boot-redboot: vmlinux.bin.gz
56boot-uboot: vmlinux.bin.gz
diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile
index f10992b89027..1fe01b78c124 100644
--- a/arch/xtensa/boot/boot-elf/Makefile
+++ b/arch/xtensa/boot/boot-elf/Makefile
@@ -4,9 +4,6 @@
4# for more details. 4# for more details.
5# 5#
6 6
7GZIP = gzip
8GZIP_FLAGS = -v9fc
9
10ifeq ($(BIG_ENDIAN),1) 7ifeq ($(BIG_ENDIAN),1)
11OBJCOPY_ARGS := -O elf32-xtensa-be 8OBJCOPY_ARGS := -O elf32-xtensa-be
12else 9else
@@ -20,18 +17,17 @@ boot-y := bootstrap.o
20 17
21OBJS := $(addprefix $(obj)/,$(boot-y)) 18OBJS := $(addprefix $(obj)/,$(boot-y))
22 19
23vmlinux.tmp: vmlinux 20$(obj)/Image.o: vmlinux.bin $(OBJS)
24 $(OBJCOPY) --strip-all -R .comment -R .note.gnu.build-id -O binary \ 21 $(Q)$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
25 $^ $@ 22 --add-section image=vmlinux.bin \
26
27Image: vmlinux.tmp $(OBJS) arch/$(ARCH)/boot/boot-elf/boot.lds
28 $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
29 --add-section image=vmlinux.tmp \
30 --set-section-flags image=contents,alloc,load,load,data \ 23 --set-section-flags image=contents,alloc,load,load,data \
31 $(OBJS) $@.tmp 24 $(OBJS) $@
32 $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
33 -T arch/$(ARCH)/boot/boot-elf/boot.lds \
34 -o arch/$(ARCH)/boot/$@.elf $@.tmp
35 25
36zImage: Image 26$(obj)/../Image.elf: $(obj)/Image.o $(obj)/boot.lds
27 $(Q)$(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
28 -T $(obj)/boot.lds \
29 --build-id=none \
30 -o $@ $(obj)/Image.o
31 $(Q)$(kecho) ' Kernel: $@ is ready'
37 32
33zImage: $(obj)/../Image.elf
diff --git a/arch/xtensa/boot/boot-redboot/Makefile b/arch/xtensa/boot/boot-redboot/Makefile
index 25a78c6b1530..8be8b9436981 100644
--- a/arch/xtensa/boot/boot-redboot/Makefile
+++ b/arch/xtensa/boot/boot-redboot/Makefile
@@ -4,8 +4,6 @@
4# for more details. 4# for more details.
5# 5#
6 6
7GZIP = gzip
8GZIP_FLAGS = -v9fc
9ifeq ($(BIG_ENDIAN),1) 7ifeq ($(BIG_ENDIAN),1)
10OBJCOPY_ARGS := -O elf32-xtensa-be 8OBJCOPY_ARGS := -O elf32-xtensa-be
11else 9else
@@ -21,17 +19,17 @@ LIBS := arch/xtensa/boot/lib/lib.a arch/xtensa/lib/lib.a
21 19
22LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) 20LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
23 21
24vmlinux.tmp: vmlinux 22$(obj)/zImage.o: vmlinux.bin.gz $(OBJS)
25 $(OBJCOPY) --strip-all -R .comment -R .note.gnu.build-id -O binary \ 23 $(Q)$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
26 $^ $@ 24 --add-section image=vmlinux.bin.gz \
25 --set-section-flags image=contents,alloc,load,load,data \
26 $(OBJS) $@
27 27
28vmlinux.tmp.gz: vmlinux.tmp 28$(obj)/zImage.elf: $(obj)/zImage.o $(LIBS)
29 $(GZIP) $(GZIP_FLAGS) $^ > $@ 29 $(Q)$(LD) $(LD_ARGS) -o $@ $^ -L/xtensa-elf/lib $(LIBGCC)
30 30
31zImage: vmlinux.tmp.gz $(OBJS) $(LIBS) 31$(obj)/../zImage.redboot: $(obj)/zImage.elf
32 $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \ 32 $(Q)$(OBJCOPY) -S -O binary $< $@
33 --add-section image=vmlinux.tmp.gz \ 33 $(Q)$(kecho) ' Kernel: $@ is ready'
34 --set-section-flags image=contents,alloc,load,load,data \ 34
35 $(OBJS) $@.tmp 35zImage: $(obj)/../zImage.redboot
36 $(LD) $(LD_ARGS) -o $@.elf $@.tmp $(LIBS) -L/xtensa-elf/lib $(LIBGCC)
37 $(OBJCOPY) -S -O binary $@.elf arch/$(ARCH)/boot/$@.redboot
diff --git a/arch/xtensa/boot/boot-uboot/Makefile b/arch/xtensa/boot/boot-uboot/Makefile
new file mode 100644
index 000000000000..bfbf8af582f1
--- /dev/null
+++ b/arch/xtensa/boot/boot-uboot/Makefile
@@ -0,0 +1,14 @@
1#
2# This file is subject to the terms and conditions of the GNU General Public
3# License. See the file "COPYING" in the main directory of this archive
4# for more details.
5#
6
7UIMAGE_LOADADDR = 0xd0001000
8UIMAGE_COMPRESSION = gzip
9
10$(obj)/../uImage: vmlinux.bin.gz FORCE
11 $(call if_changed,uimage)
12 $(Q)$(kecho) ' Kernel: $@ is ready'
13
14zImage: $(obj)/../uImage
diff --git a/arch/xtensa/boot/dts/lx60.dts b/arch/xtensa/boot/dts/lx60.dts
new file mode 100644
index 000000000000..2eab3658e1bd
--- /dev/null
+++ b/arch/xtensa/boot/dts/lx60.dts
@@ -0,0 +1,11 @@
1/dts-v1/;
2/include/ "xtfpga.dtsi"
3/include/ "xtfpga-flash-4m.dtsi"
4
5/ {
6 compatible = "xtensa,lx60";
7 memory@0 {
8 device_type = "memory";
9 reg = <0x00000000 0x04000000>;
10 };
11};
diff --git a/arch/xtensa/boot/dts/ml605.dts b/arch/xtensa/boot/dts/ml605.dts
new file mode 100644
index 000000000000..6ed51d6554e6
--- /dev/null
+++ b/arch/xtensa/boot/dts/ml605.dts
@@ -0,0 +1,11 @@
1/dts-v1/;
2/include/ "xtfpga.dtsi"
3/include/ "xtfpga-flash-16m.dtsi"
4
5/ {
6 compatible = "xtensa,ml605";
7 memory@0 {
8 device_type = "memory";
9 reg = <0x00000000 0x08000000>;
10 };
11};
diff --git a/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
new file mode 100644
index 000000000000..e5703c7beeb6
--- /dev/null
+++ b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
@@ -0,0 +1,26 @@
1/ {
2 flash: flash@f8000000 {
3 #address-cells = <1>;
4 #size-cells = <1>;
5 compatible = "cfi-flash";
6 reg = <0xf8000000 0x01000000>;
7 bank-width = <2>;
8 device-width = <2>;
9 partition@0x0 {
10 label = "boot loader area";
11 reg = <0x00000000 0x00400000>;
12 };
13 partition@0x400000 {
14 label = "kernel image";
15 reg = <0x00400000 0x00600000>;
16 };
17 partition@0xa00000 {
18 label = "data";
19 reg = <0x00a00000 0x005e0000>;
20 };
21 partition@0xfe0000 {
22 label = "boot environment";
23 reg = <0x00fe0000 0x00020000>;
24 };
25 };
26};
diff --git a/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
new file mode 100644
index 000000000000..6f9c10d6b689
--- /dev/null
+++ b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
@@ -0,0 +1,18 @@
1/ {
2 flash: flash@f8000000 {
3 #address-cells = <1>;
4 #size-cells = <1>;
5 compatible = "cfi-flash";
6 reg = <0xf8000000 0x00400000>;
7 bank-width = <2>;
8 device-width = <2>;
9 partition@0x0 {
10 label = "boot loader area";
11 reg = <0x00000000 0x003f0000>;
12 };
13 partition@0x3f0000 {
14 label = "boot environment";
15 reg = <0x003f0000 0x00010000>;
16 };
17 };
18};
diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi
new file mode 100644
index 000000000000..7eda6ecf7eef
--- /dev/null
+++ b/arch/xtensa/boot/dts/xtfpga.dtsi
@@ -0,0 +1,56 @@
1/ {
2 compatible = "xtensa,xtfpga";
3 #address-cells = <1>;
4 #size-cells = <1>;
5 interrupt-parent = <&pic>;
6
7 chosen {
8 bootargs = "earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug";
9 };
10
11 memory@0 {
12 device_type = "memory";
13 reg = <0x00000000 0x06000000>;
14 };
15
16 cpus {
17 #address-cells = <1>;
18 #size-cells = <0>;
19 cpu@0 {
20 compatible = "xtensa,cpu";
21 reg = <0>;
22 /* Filled in by platform_setup from FPGA register
23 * clock-frequency = <100000000>;
24 */
25 };
26 };
27
28 pic: pic {
29 compatible = "xtensa,pic";
30 /* one cell: internal irq number,
31 * two cells: second cell == 0: internal irq number
32 * second cell == 1: external irq number
33 */
34 #interrupt-cells = <2>;
35 interrupt-controller;
36 };
37
38 serial0: serial@fd050020 {
39 device_type = "serial";
40 compatible = "ns16550a";
41 no-loopback-test;
42 reg = <0xfd050020 0x20>;
43 reg-shift = <2>;
44 interrupts = <0 1>; /* external irq 0 */
45 /* Filled in by platform_setup from FPGA register
46 * clock-frequency = <100000000>;
47 */
48 };
49
50 enet0: ethoc@fd030000 {
51 compatible = "opencores,ethoc";
52 reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
53 interrupts = <1 1>; /* external irq 1 */
54 local-mac-address = [00 50 c2 13 6f 00];
55 };
56};
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 24f50cada70c..c3f289174c10 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -66,19 +66,35 @@
66 */ 66 */
67static inline void atomic_add(int i, atomic_t * v) 67static inline void atomic_add(int i, atomic_t * v)
68{ 68{
69 unsigned int vval; 69#if XCHAL_HAVE_S32C1I
70 70 unsigned long tmp;
71 __asm__ __volatile__( 71 int result;
72 "rsil a15, "__stringify(LOCKLEVEL)"\n\t" 72
73 "l32i %0, %2, 0 \n\t" 73 __asm__ __volatile__(
74 "add %0, %0, %1 \n\t" 74 "1: l32i %1, %3, 0\n"
75 "s32i %0, %2, 0 \n\t" 75 " wsr %1, scompare1\n"
76 "wsr a15, ps \n\t" 76 " add %0, %1, %2\n"
77 "rsync \n" 77 " s32c1i %0, %3, 0\n"
78 : "=&a" (vval) 78 " bne %0, %1, 1b\n"
79 : "a" (i), "a" (v) 79 : "=&a" (result), "=&a" (tmp)
80 : "a15", "memory" 80 : "a" (i), "a" (v)
81 ); 81 : "memory"
82 );
83#else
84 unsigned int vval;
85
86 __asm__ __volatile__(
87 " rsil a15, "__stringify(LOCKLEVEL)"\n"
88 " l32i %0, %2, 0\n"
89 " add %0, %0, %1\n"
90 " s32i %0, %2, 0\n"
91 " wsr a15, ps\n"
92 " rsync\n"
93 : "=&a" (vval)
94 : "a" (i), "a" (v)
95 : "a15", "memory"
96 );
97#endif
82} 98}
83 99
84/** 100/**
@@ -90,19 +106,35 @@ static inline void atomic_add(int i, atomic_t * v)
90 */ 106 */
91static inline void atomic_sub(int i, atomic_t *v) 107static inline void atomic_sub(int i, atomic_t *v)
92{ 108{
93 unsigned int vval; 109#if XCHAL_HAVE_S32C1I
94 110 unsigned long tmp;
95 __asm__ __volatile__( 111 int result;
96 "rsil a15, "__stringify(LOCKLEVEL)"\n\t" 112
97 "l32i %0, %2, 0 \n\t" 113 __asm__ __volatile__(
98 "sub %0, %0, %1 \n\t" 114 "1: l32i %1, %3, 0\n"
99 "s32i %0, %2, 0 \n\t" 115 " wsr %1, scompare1\n"
100 "wsr a15, ps \n\t" 116 " sub %0, %1, %2\n"
101 "rsync \n" 117 " s32c1i %0, %3, 0\n"
102 : "=&a" (vval) 118 " bne %0, %1, 1b\n"
103 : "a" (i), "a" (v) 119 : "=&a" (result), "=&a" (tmp)
104 : "a15", "memory" 120 : "a" (i), "a" (v)
105 ); 121 : "memory"
122 );
123#else
124 unsigned int vval;
125
126 __asm__ __volatile__(
127 " rsil a15, "__stringify(LOCKLEVEL)"\n"
128 " l32i %0, %2, 0\n"
129 " sub %0, %0, %1\n"
130 " s32i %0, %2, 0\n"
131 " wsr a15, ps\n"
132 " rsync\n"
133 : "=&a" (vval)
134 : "a" (i), "a" (v)
135 : "a15", "memory"
136 );
137#endif
106} 138}
107 139
108/* 140/*
@@ -111,40 +143,78 @@ static inline void atomic_sub(int i, atomic_t *v)
111 143
112static inline int atomic_add_return(int i, atomic_t * v) 144static inline int atomic_add_return(int i, atomic_t * v)
113{ 145{
114 unsigned int vval; 146#if XCHAL_HAVE_S32C1I
115 147 unsigned long tmp;
116 __asm__ __volatile__( 148 int result;
117 "rsil a15,"__stringify(LOCKLEVEL)"\n\t" 149
118 "l32i %0, %2, 0 \n\t" 150 __asm__ __volatile__(
119 "add %0, %0, %1 \n\t" 151 "1: l32i %1, %3, 0\n"
120 "s32i %0, %2, 0 \n\t" 152 " wsr %1, scompare1\n"
121 "wsr a15, ps \n\t" 153 " add %0, %1, %2\n"
122 "rsync \n" 154 " s32c1i %0, %3, 0\n"
123 : "=&a" (vval) 155 " bne %0, %1, 1b\n"
124 : "a" (i), "a" (v) 156 " add %0, %0, %2\n"
125 : "a15", "memory" 157 : "=&a" (result), "=&a" (tmp)
126 ); 158 : "a" (i), "a" (v)
127 159 : "memory"
128 return vval; 160 );
161
162 return result;
163#else
164 unsigned int vval;
165
166 __asm__ __volatile__(
167 " rsil a15,"__stringify(LOCKLEVEL)"\n"
168 " l32i %0, %2, 0\n"
169 " add %0, %0, %1\n"
170 " s32i %0, %2, 0\n"
171 " wsr a15, ps\n"
172 " rsync\n"
173 : "=&a" (vval)
174 : "a" (i), "a" (v)
175 : "a15", "memory"
176 );
177
178 return vval;
179#endif
129} 180}
130 181
131static inline int atomic_sub_return(int i, atomic_t * v) 182static inline int atomic_sub_return(int i, atomic_t * v)
132{ 183{
133 unsigned int vval; 184#if XCHAL_HAVE_S32C1I
134 185 unsigned long tmp;
135 __asm__ __volatile__( 186 int result;
136 "rsil a15,"__stringify(LOCKLEVEL)"\n\t" 187
137 "l32i %0, %2, 0 \n\t" 188 __asm__ __volatile__(
138 "sub %0, %0, %1 \n\t" 189 "1: l32i %1, %3, 0\n"
139 "s32i %0, %2, 0 \n\t" 190 " wsr %1, scompare1\n"
140 "wsr a15, ps \n\t" 191 " sub %0, %1, %2\n"
141 "rsync \n" 192 " s32c1i %0, %3, 0\n"
142 : "=&a" (vval) 193 " bne %0, %1, 1b\n"
143 : "a" (i), "a" (v) 194 " sub %0, %0, %2\n"
144 : "a15", "memory" 195 : "=&a" (result), "=&a" (tmp)
145 ); 196 : "a" (i), "a" (v)
146 197 : "memory"
147 return vval; 198 );
199
200 return result;
201#else
202 unsigned int vval;
203
204 __asm__ __volatile__(
205 " rsil a15,"__stringify(LOCKLEVEL)"\n"
206 " l32i %0, %2, 0\n"
207 " sub %0, %0, %1\n"
208 " s32i %0, %2, 0\n"
209 " wsr a15, ps\n"
210 " rsync\n"
211 : "=&a" (vval)
212 : "a" (i), "a" (v)
213 : "a15", "memory"
214 );
215
216 return vval;
217#endif
148} 218}
149 219
150/** 220/**
@@ -251,38 +321,70 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
251 321
252static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 322static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
253{ 323{
254 unsigned int all_f = -1; 324#if XCHAL_HAVE_S32C1I
255 unsigned int vval; 325 unsigned long tmp;
256 326 int result;
257 __asm__ __volatile__( 327
258 "rsil a15,"__stringify(LOCKLEVEL)"\n\t" 328 __asm__ __volatile__(
259 "l32i %0, %2, 0 \n\t" 329 "1: l32i %1, %3, 0\n"
260 "xor %1, %4, %3 \n\t" 330 " wsr %1, scompare1\n"
261 "and %0, %0, %4 \n\t" 331 " and %0, %1, %2\n"
262 "s32i %0, %2, 0 \n\t" 332 " s32c1i %0, %3, 0\n"
263 "wsr a15, ps \n\t" 333 " bne %0, %1, 1b\n"
264 "rsync \n" 334 : "=&a" (result), "=&a" (tmp)
265 : "=&a" (vval), "=a" (mask) 335 : "a" (~mask), "a" (v)
266 : "a" (v), "a" (all_f), "1" (mask) 336 : "memory"
267 : "a15", "memory" 337 );
268 ); 338#else
339 unsigned int all_f = -1;
340 unsigned int vval;
341
342 __asm__ __volatile__(
343 " rsil a15,"__stringify(LOCKLEVEL)"\n"
344 " l32i %0, %2, 0\n"
345 " xor %1, %4, %3\n"
346 " and %0, %0, %4\n"
347 " s32i %0, %2, 0\n"
348 " wsr a15, ps\n"
349 " rsync\n"
350 : "=&a" (vval), "=a" (mask)
351 : "a" (v), "a" (all_f), "1" (mask)
352 : "a15", "memory"
353 );
354#endif
269} 355}
270 356
271static inline void atomic_set_mask(unsigned int mask, atomic_t *v) 357static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
272{ 358{
273 unsigned int vval; 359#if XCHAL_HAVE_S32C1I
274 360 unsigned long tmp;
275 __asm__ __volatile__( 361 int result;
276 "rsil a15,"__stringify(LOCKLEVEL)"\n\t" 362
277 "l32i %0, %2, 0 \n\t" 363 __asm__ __volatile__(
278 "or %0, %0, %1 \n\t" 364 "1: l32i %1, %3, 0\n"
279 "s32i %0, %2, 0 \n\t" 365 " wsr %1, scompare1\n"
280 "wsr a15, ps \n\t" 366 " or %0, %1, %2\n"
281 "rsync \n" 367 " s32c1i %0, %3, 0\n"
282 : "=&a" (vval) 368 " bne %0, %1, 1b\n"
283 : "a" (mask), "a" (v) 369 : "=&a" (result), "=&a" (tmp)
284 : "a15", "memory" 370 : "a" (mask), "a" (v)
285 ); 371 : "memory"
372 );
373#else
374 unsigned int vval;
375
376 __asm__ __volatile__(
377 " rsil a15,"__stringify(LOCKLEVEL)"\n"
378 " l32i %0, %2, 0\n"
379 " or %0, %0, %1\n"
380 " s32i %0, %2, 0\n"
381 " wsr a15, ps\n"
382 " rsync\n"
383 : "=&a" (vval)
384 : "a" (mask), "a" (v)
385 : "a15", "memory"
386 );
387#endif
286} 388}
287 389
288/* Atomic operations are already serializing */ 390/* Atomic operations are already serializing */
@@ -294,4 +396,3 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
294#endif /* __KERNEL__ */ 396#endif /* __KERNEL__ */
295 397
296#endif /* _XTENSA_ATOMIC_H */ 398#endif /* _XTENSA_ATOMIC_H */
297
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
index 55707a8009d3..ef021677d536 100644
--- a/arch/xtensa/include/asm/barrier.h
+++ b/arch/xtensa/include/asm/barrier.h
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2001 - 2005 Tensilica Inc. 6 * Copyright (C) 2001 - 2012 Tensilica Inc.
7 */ 7 */
8 8
9#ifndef _XTENSA_SYSTEM_H 9#ifndef _XTENSA_SYSTEM_H
@@ -12,8 +12,8 @@
12#define smp_read_barrier_depends() do { } while(0) 12#define smp_read_barrier_depends() do { } while(0)
13#define read_barrier_depends() do { } while(0) 13#define read_barrier_depends() do { } while(0)
14 14
15#define mb() barrier() 15#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
16#define rmb() mb() 16#define rmb() barrier()
17#define wmb() mb() 17#define wmb() mb()
18 18
19#ifdef CONFIG_SMP 19#ifdef CONFIG_SMP
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index 5270197ddd36..84afe58d5d37 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -29,7 +29,6 @@
29#define smp_mb__before_clear_bit() barrier() 29#define smp_mb__before_clear_bit() barrier()
30#define smp_mb__after_clear_bit() barrier() 30#define smp_mb__after_clear_bit() barrier()
31 31
32#include <asm-generic/bitops/atomic.h>
33#include <asm-generic/bitops/non-atomic.h> 32#include <asm-generic/bitops/non-atomic.h>
34 33
35#if XCHAL_HAVE_NSA 34#if XCHAL_HAVE_NSA
@@ -104,6 +103,132 @@ static inline unsigned long __fls(unsigned long word)
104#endif 103#endif
105 104
106#include <asm-generic/bitops/fls64.h> 105#include <asm-generic/bitops/fls64.h>
106
107#if XCHAL_HAVE_S32C1I
108
109static inline void set_bit(unsigned int bit, volatile unsigned long *p)
110{
111 unsigned long tmp, value;
112 unsigned long mask = 1UL << (bit & 31);
113
114 p += bit >> 5;
115
116 __asm__ __volatile__(
117 "1: l32i %1, %3, 0\n"
118 " wsr %1, scompare1\n"
119 " or %0, %1, %2\n"
120 " s32c1i %0, %3, 0\n"
121 " bne %0, %1, 1b\n"
122 : "=&a" (tmp), "=&a" (value)
123 : "a" (mask), "a" (p)
124 : "memory");
125}
126
127static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
128{
129 unsigned long tmp, value;
130 unsigned long mask = 1UL << (bit & 31);
131
132 p += bit >> 5;
133
134 __asm__ __volatile__(
135 "1: l32i %1, %3, 0\n"
136 " wsr %1, scompare1\n"
137 " and %0, %1, %2\n"
138 " s32c1i %0, %3, 0\n"
139 " bne %0, %1, 1b\n"
140 : "=&a" (tmp), "=&a" (value)
141 : "a" (~mask), "a" (p)
142 : "memory");
143}
144
145static inline void change_bit(unsigned int bit, volatile unsigned long *p)
146{
147 unsigned long tmp, value;
148 unsigned long mask = 1UL << (bit & 31);
149
150 p += bit >> 5;
151
152 __asm__ __volatile__(
153 "1: l32i %1, %3, 0\n"
154 " wsr %1, scompare1\n"
155 " xor %0, %1, %2\n"
156 " s32c1i %0, %3, 0\n"
157 " bne %0, %1, 1b\n"
158 : "=&a" (tmp), "=&a" (value)
159 : "a" (mask), "a" (p)
160 : "memory");
161}
162
163static inline int
164test_and_set_bit(unsigned int bit, volatile unsigned long *p)
165{
166 unsigned long tmp, value;
167 unsigned long mask = 1UL << (bit & 31);
168
169 p += bit >> 5;
170
171 __asm__ __volatile__(
172 "1: l32i %1, %3, 0\n"
173 " wsr %1, scompare1\n"
174 " or %0, %1, %2\n"
175 " s32c1i %0, %3, 0\n"
176 " bne %0, %1, 1b\n"
177 : "=&a" (tmp), "=&a" (value)
178 : "a" (mask), "a" (p)
179 : "memory");
180
181 return tmp & mask;
182}
183
184static inline int
185test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
186{
187 unsigned long tmp, value;
188 unsigned long mask = 1UL << (bit & 31);
189
190 p += bit >> 5;
191
192 __asm__ __volatile__(
193 "1: l32i %1, %3, 0\n"
194 " wsr %1, scompare1\n"
195 " and %0, %1, %2\n"
196 " s32c1i %0, %3, 0\n"
197 " bne %0, %1, 1b\n"
198 : "=&a" (tmp), "=&a" (value)
199 : "a" (~mask), "a" (p)
200 : "memory");
201
202 return tmp & mask;
203}
204
205static inline int
206test_and_change_bit(unsigned int bit, volatile unsigned long *p)
207{
208 unsigned long tmp, value;
209 unsigned long mask = 1UL << (bit & 31);
210
211 p += bit >> 5;
212
213 __asm__ __volatile__(
214 "1: l32i %1, %3, 0\n"
215 " wsr %1, scompare1\n"
216 " xor %0, %1, %2\n"
217 " s32c1i %0, %3, 0\n"
218 " bne %0, %1, 1b\n"
219 : "=&a" (tmp), "=&a" (value)
220 : "a" (mask), "a" (p)
221 : "memory");
222
223 return tmp & mask;
224}
225
226#else
227
228#include <asm-generic/bitops/atomic.h>
229
230#endif /* XCHAL_HAVE_S32C1I */
231
107#include <asm-generic/bitops/find.h> 232#include <asm-generic/bitops/find.h>
108#include <asm-generic/bitops/le.h> 233#include <asm-generic/bitops/le.h>
109 234
diff --git a/arch/xtensa/include/asm/bootparam.h b/arch/xtensa/include/asm/bootparam.h
index 9983f2c1b7ee..0c25799facab 100644
--- a/arch/xtensa/include/asm/bootparam.h
+++ b/arch/xtensa/include/asm/bootparam.h
@@ -22,6 +22,7 @@
22#define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */ 22#define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */
23#define BP_TAG_SERIAL_BAUSRATE 0x1004 /* baud rate of current console. */ 23#define BP_TAG_SERIAL_BAUSRATE 0x1004 /* baud rate of current console. */
24#define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */ 24#define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */
25#define BP_TAG_FDT 0x1006 /* flat device tree addr */
25 26
26#define BP_TAG_FIRST 0x7B0B /* first tag with a version number */ 27#define BP_TAG_FIRST 0x7B0B /* first tag with a version number */
27#define BP_TAG_LAST 0x7E0B /* last tag */ 28#define BP_TAG_LAST 0x7E0B /* last tag */
@@ -31,15 +32,15 @@
31/* All records are aligned to 4 bytes */ 32/* All records are aligned to 4 bytes */
32 33
33typedef struct bp_tag { 34typedef struct bp_tag {
34 unsigned short id; /* tag id */ 35 unsigned short id; /* tag id */
35 unsigned short size; /* size of this record excluding the structure*/ 36 unsigned short size; /* size of this record excluding the structure*/
36 unsigned long data[0]; /* data */ 37 unsigned long data[0]; /* data */
37} bp_tag_t; 38} bp_tag_t;
38 39
39typedef struct meminfo { 40typedef struct meminfo {
40 unsigned long type; 41 unsigned long type;
41 unsigned long start; 42 unsigned long start;
42 unsigned long end; 43 unsigned long end;
43} meminfo_t; 44} meminfo_t;
44 45
45#define SYSMEM_BANKS_MAX 5 46#define SYSMEM_BANKS_MAX 5
@@ -48,14 +49,11 @@ typedef struct meminfo {
48#define MEMORY_TYPE_NONE 0x2000 49#define MEMORY_TYPE_NONE 0x2000
49 50
50typedef struct sysmem_info { 51typedef struct sysmem_info {
51 int nr_banks; 52 int nr_banks;
52 meminfo_t bank[SYSMEM_BANKS_MAX]; 53 meminfo_t bank[SYSMEM_BANKS_MAX];
53} sysmem_info_t; 54} sysmem_info_t;
54 55
55extern sysmem_info_t sysmem; 56extern sysmem_info_t sysmem;
56 57
57#endif 58#endif
58#endif 59#endif
59
60
61
diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
index 2c20a58f94cd..60e18773ecb8 100644
--- a/arch/xtensa/include/asm/cacheasm.h
+++ b/arch/xtensa/include/asm/cacheasm.h
@@ -174,4 +174,3 @@
174 __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 174 __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
175 175
176 .endm 176 .endm
177
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index 569fec4f9a20..127cd48883c4 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -104,7 +104,8 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
104#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 104#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
105extern void flush_dcache_page(struct page*); 105extern void flush_dcache_page(struct page*);
106extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); 106extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
107extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); 107extern void flush_cache_page(struct vm_area_struct*,
108 unsigned long, unsigned long);
108 109
109#else 110#else
110 111
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h
index e4d831a30772..aed7ad68ca46 100644
--- a/arch/xtensa/include/asm/checksum.h
+++ b/arch/xtensa/include/asm/checksum.h
@@ -36,8 +36,9 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
36 * better 64-bit) boundary 36 * better 64-bit) boundary
37 */ 37 */
38 38
39asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum, 39asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
40 int *src_err_ptr, int *dst_err_ptr); 40 int len, __wsum sum,
41 int *src_err_ptr, int *dst_err_ptr);
41 42
42/* 43/*
43 * Note: when you get a NULL pointer exception here this means someone 44 * Note: when you get a NULL pointer exception here this means someone
@@ -54,7 +55,7 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst,
54 55
55static inline 56static inline
56__wsum csum_partial_copy_from_user(const void __user *src, void *dst, 57__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
57 int len, __wsum sum, int *err_ptr) 58 int len, __wsum sum, int *err_ptr)
58{ 59{
59 return csum_partial_copy_generic((__force const void *)src, dst, 60 return csum_partial_copy_generic((__force const void *)src, dst,
60 len, sum, err_ptr, NULL); 61 len, sum, err_ptr, NULL);
@@ -112,7 +113,8 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
112 /* Since the input registers which are loaded with iph and ihl 113 /* Since the input registers which are loaded with iph and ihl
113 are modified, we must also specify them as outputs, or gcc 114 are modified, we must also specify them as outputs, or gcc
114 will assume they contain their original values. */ 115 will assume they contain their original values. */
115 : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), "=&r" (endaddr) 116 : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp),
117 "=&r" (endaddr)
116 : "1" (iph), "2" (ihl) 118 : "1" (iph), "2" (ihl)
117 : "memory"); 119 : "memory");
118 120
@@ -168,7 +170,7 @@ static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
168 170
169static __inline__ __sum16 ip_compute_csum(const void *buff, int len) 171static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
170{ 172{
171 return csum_fold (csum_partial(buff, len, 0)); 173 return csum_fold (csum_partial(buff, len, 0));
172} 174}
173 175
174#define _HAVE_ARCH_IPV6_CSUM 176#define _HAVE_ARCH_IPV6_CSUM
@@ -238,11 +240,12 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
238 * Copy and checksum to user 240 * Copy and checksum to user
239 */ 241 */
240#define HAVE_CSUM_COPY_USER 242#define HAVE_CSUM_COPY_USER
241static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst, 243static __inline__ __wsum csum_and_copy_to_user(const void *src,
242 int len, __wsum sum, int *err_ptr) 244 void __user *dst, int len,
245 __wsum sum, int *err_ptr)
243{ 246{
244 if (access_ok(VERIFY_WRITE, dst, len)) 247 if (access_ok(VERIFY_WRITE, dst, len))
245 return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr); 248 return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr);
246 249
247 if (len) 250 if (len)
248 *err_ptr = -EFAULT; 251 *err_ptr = -EFAULT;
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index 64dad04a9d27..d9ab131bc1aa 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -22,17 +22,30 @@
22static inline unsigned long 22static inline unsigned long
23__cmpxchg_u32(volatile int *p, int old, int new) 23__cmpxchg_u32(volatile int *p, int old, int new)
24{ 24{
25 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" 25#if XCHAL_HAVE_S32C1I
26 "l32i %0, %1, 0 \n\t" 26 __asm__ __volatile__(
27 "bne %0, %2, 1f \n\t" 27 " wsr %2, scompare1\n"
28 "s32i %3, %1, 0 \n\t" 28 " s32c1i %0, %1, 0\n"
29 "1: \n\t" 29 : "+a" (new)
30 "wsr a15, ps \n\t" 30 : "a" (p), "a" (old)
31 "rsync \n\t" 31 : "memory"
32 : "=&a" (old) 32 );
33 : "a" (p), "a" (old), "r" (new) 33
34 : "a15", "memory"); 34 return new;
35 return old; 35#else
36 __asm__ __volatile__(
37 " rsil a15, "__stringify(LOCKLEVEL)"\n"
38 " l32i %0, %1, 0\n"
39 " bne %0, %2, 1f\n"
40 " s32i %3, %1, 0\n"
41 "1:\n"
42 " wsr a15, ps\n"
43 " rsync\n"
44 : "=&a" (old)
45 : "a" (p), "a" (old), "r" (new)
46 : "a15", "memory");
47 return old;
48#endif
36} 49}
37/* This function doesn't exist, so you'll get a linker error 50/* This function doesn't exist, so you'll get a linker error
38 * if something tries to do an invalid cmpxchg(). */ 51 * if something tries to do an invalid cmpxchg(). */
@@ -93,19 +106,36 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
93 106
94static inline unsigned long xchg_u32(volatile int * m, unsigned long val) 107static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
95{ 108{
96 unsigned long tmp; 109#if XCHAL_HAVE_S32C1I
97 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" 110 unsigned long tmp, result;
98 "l32i %0, %1, 0 \n\t" 111 __asm__ __volatile__(
99 "s32i %2, %1, 0 \n\t" 112 "1: l32i %1, %2, 0\n"
100 "wsr a15, ps \n\t" 113 " mov %0, %3\n"
101 "rsync \n\t" 114 " wsr %1, scompare1\n"
102 : "=&a" (tmp) 115 " s32c1i %0, %2, 0\n"
103 : "a" (m), "a" (val) 116 " bne %0, %1, 1b\n"
104 : "a15", "memory"); 117 : "=&a" (result), "=&a" (tmp)
105 return tmp; 118 : "a" (m), "a" (val)
119 : "memory"
120 );
121 return result;
122#else
123 unsigned long tmp;
124 __asm__ __volatile__(
125 " rsil a15, "__stringify(LOCKLEVEL)"\n"
126 " l32i %0, %1, 0\n"
127 " s32i %2, %1, 0\n"
128 " wsr a15, ps\n"
129 " rsync\n"
130 : "=&a" (tmp)
131 : "a" (m), "a" (val)
132 : "a15", "memory");
133 return tmp;
134#endif
106} 135}
107 136
108#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 137#define xchg(ptr,x) \
138 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
109 139
110/* 140/*
111 * This only works if the compiler isn't horribly bad at optimizing. 141 * This only works if the compiler isn't horribly bad at optimizing.
diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h
index 8d1eb5d78649..47e46dcf5d49 100644
--- a/arch/xtensa/include/asm/current.h
+++ b/arch/xtensa/include/asm/current.h
@@ -30,7 +30,7 @@ static inline struct task_struct *get_current(void)
30 30
31#define GET_CURRENT(reg,sp) \ 31#define GET_CURRENT(reg,sp) \
32 GET_THREAD_INFO(reg,sp); \ 32 GET_THREAD_INFO(reg,sp); \
33 l32i reg, reg, TI_TASK \ 33 l32i reg, reg, TI_TASK \
34 34
35#endif 35#endif
36 36
diff --git a/arch/xtensa/include/asm/delay.h b/arch/xtensa/include/asm/delay.h
index 58c0a4fd4003..61fc5faeb46c 100644
--- a/arch/xtensa/include/asm/delay.h
+++ b/arch/xtensa/include/asm/delay.h
@@ -19,9 +19,9 @@ extern unsigned long loops_per_jiffy;
19 19
20static inline void __delay(unsigned long loops) 20static inline void __delay(unsigned long loops)
21{ 21{
22 /* 2 cycles per loop. */ 22 /* 2 cycles per loop. */
23 __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b" 23 __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
24 : "=r" (loops) : "0" (loops)); 24 : "=r" (loops) : "0" (loops));
25} 25}
26 26
27static __inline__ u32 xtensa_get_ccount(void) 27static __inline__ u32 xtensa_get_ccount(void)
@@ -46,4 +46,3 @@ static __inline__ void udelay (unsigned long usecs)
46} 46}
47 47
48#endif 48#endif
49
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 492c95790ad5..4acb5feba1fb 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -16,6 +16,8 @@
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/scatterlist.h> 17#include <linux/scatterlist.h>
18 18
19#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
20
19/* 21/*
20 * DMA-consistent mapping functions. 22 * DMA-consistent mapping functions.
21 */ 23 */
@@ -98,8 +100,8 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
98} 100}
99 101
100static inline void 102static inline void
101dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, 103dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
102 enum dma_data_direction direction) 104 size_t size, enum dma_data_direction direction)
103{ 105{
104 consistent_sync((void *)bus_to_virt(dma_handle), size, direction); 106 consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
105} 107}
diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h
index 5293312bc6a4..264d5fa450d8 100644
--- a/arch/xtensa/include/asm/elf.h
+++ b/arch/xtensa/include/asm/elf.h
@@ -168,11 +168,11 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
168 */ 168 */
169 169
170#define ELF_PLAT_INIT(_r, load_addr) \ 170#define ELF_PLAT_INIT(_r, load_addr) \
171 do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \ 171 do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \
172 _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \ 172 _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \
173 _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \ 173 _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \
174 _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \ 174 _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
175 } while (0) 175 } while (0)
176 176
177typedef struct { 177typedef struct {
178 xtregs_opt_t opt; 178 xtregs_opt_t opt;
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index 0a046ca5a687..80be15124697 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -14,4 +14,3 @@
14extern void flush_cache_kmaps(void); 14extern void flush_cache_kmaps(void);
15 15
16#endif 16#endif
17
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
new file mode 100644
index 000000000000..e1f8ba4061ed
--- /dev/null
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -0,0 +1,55 @@
1/*
2 * arch/xtensa/include/asm/initialize_mmu.h
3 *
4 * Initializes MMU:
5 *
6 * For the new V3 MMU we remap the TLB from virtual == physical
7 * to the standard Linux mapping used in earlier MMU's.
8 *
9 * The the MMU we also support a new configuration register that
10 * specifies how the S32C1I instruction operates with the cache
11 * controller.
12 *
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file "COPYING" in the main directory of
15 * this archive for more details.
16 *
17 * Copyright (C) 2008 - 2012 Tensilica, Inc.
18 *
19 * Marc Gauthier <marc@tensilica.com>
20 * Pete Delaney <piet@tensilica.com>
21 */
22
23#ifndef _XTENSA_INITIALIZE_MMU_H
24#define _XTENSA_INITIALIZE_MMU_H
25
26#ifdef __ASSEMBLY__
27
28#define XTENSA_HWVERSION_RC_2009_0 230000
29
30 .macro initialize_mmu
31
32#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
33/*
34 * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
35 * For details see Documentation/xtensa/atomctl.txt
36 */
37#if XCHAL_DCACHE_IS_COHERENT
38 movi a3, 0x25 /* For SMP/MX -- internal for writeback,
39 * RCW otherwise
40 */
41#else
42 movi a3, 0x29 /* non-MX -- Most cores use Std Memory
43 * Controlers which usually can't use RCW
44 */
45#endif
46 wsr a3, atomctl
47#endif /* XCHAL_HAVE_S32C1I &&
48 * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
49 */
50
51 .endm
52
53#endif /*__ASSEMBLY__*/
54
55#endif /* _XTENSA_INITIALIZE_MMU_H */
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
index feb10af96519..d43525a286bb 100644
--- a/arch/xtensa/include/asm/mmu_context.h
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -107,7 +107,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
107 107
108 108
109static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 109static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
110 struct task_struct *tsk) 110 struct task_struct *tsk)
111{ 111{
112 unsigned long asid = asid_cache; 112 unsigned long asid = asid_cache;
113 113
diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h
index 599e7a2e729d..3407cf7989b7 100644
--- a/arch/xtensa/include/asm/nommu_context.h
+++ b/arch/xtensa/include/asm/nommu_context.h
@@ -2,7 +2,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
2{ 2{
3} 3}
4 4
5static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 5static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm)
6{ 6{
7 return 0; 7 return 0;
8} 8}
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 7a5591a71f85..47f582333f6b 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -29,19 +29,19 @@
29 * PAGE_SHIFT determines the page size 29 * PAGE_SHIFT determines the page size
30 */ 30 */
31 31
32#define PAGE_SHIFT 12 32#define PAGE_SHIFT 12
33#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT) 33#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
34#define PAGE_MASK (~(PAGE_SIZE-1)) 34#define PAGE_MASK (~(PAGE_SIZE-1))
35 35
36#ifdef CONFIG_MMU 36#ifdef CONFIG_MMU
37#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR 37#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
38#define MAX_MEM_PFN XCHAL_KSEG_SIZE 38#define MAX_MEM_PFN XCHAL_KSEG_SIZE
39#else 39#else
40#define PAGE_OFFSET 0 40#define PAGE_OFFSET 0
41#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) 41#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
42#endif 42#endif
43 43
44#define PGTABLE_START 0x80000000 44#define PGTABLE_START 0x80000000
45 45
46/* 46/*
47 * Cache aliasing: 47 * Cache aliasing:
@@ -161,7 +161,9 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*);
161 161
162#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) 162#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
163#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) 163#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
164#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) 164#define pfn_valid(pfn) \
165 ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
166
165#ifdef CONFIG_DISCONTIGMEM 167#ifdef CONFIG_DISCONTIGMEM
166# error CONFIG_DISCONTIGMEM not supported 168# error CONFIG_DISCONTIGMEM not supported
167#endif 169#endif
diff --git a/arch/xtensa/include/asm/pci-bridge.h b/arch/xtensa/include/asm/pci-bridge.h
index 00fcbd7c534a..0b68c76ec1e6 100644
--- a/arch/xtensa/include/asm/pci-bridge.h
+++ b/arch/xtensa/include/asm/pci-bridge.h
@@ -35,7 +35,7 @@ struct pci_space {
35struct pci_controller { 35struct pci_controller {
36 int index; /* used for pci_controller_num */ 36 int index; /* used for pci_controller_num */
37 struct pci_controller *next; 37 struct pci_controller *next;
38 struct pci_bus *bus; 38 struct pci_bus *bus;
39 void *arch_data; 39 void *arch_data;
40 40
41 int first_busno; 41 int first_busno;
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
index 05244f07dd31..614be031a79a 100644
--- a/arch/xtensa/include/asm/pci.h
+++ b/arch/xtensa/include/asm/pci.h
@@ -53,7 +53,7 @@ struct pci_dev;
53 53
54/* Map a range of PCI memory or I/O space for a device into user space */ 54/* Map a range of PCI memory or I/O space for a device into user space */
55int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, 55int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
56 enum pci_mmap_state mmap_state, int write_combine); 56 enum pci_mmap_state mmap_state, int write_combine);
57 57
58/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ 58/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
59#define HAVE_PCI_MMAP 1 59#define HAVE_PCI_MMAP 1
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index 40cf9bceda2c..cf914c8c249a 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -42,7 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
42 42
43extern struct kmem_cache *pgtable_cache; 43extern struct kmem_cache *pgtable_cache;
44 44
45static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 45static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
46 unsigned long address) 46 unsigned long address)
47{ 47{
48 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT); 48 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index b03c043ce75b..c90ea5bfa1b4 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -284,7 +284,7 @@ struct vm_area_struct;
284 284
285static inline int 285static inline int
286ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, 286ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
287 pte_t *ptep) 287 pte_t *ptep)
288{ 288{
289 pte_t pte = *ptep; 289 pte_t pte = *ptep;
290 if (!pte_young(pte)) 290 if (!pte_young(pte))
@@ -304,8 +304,8 @@ ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
304static inline void 304static inline void
305ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 305ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
306{ 306{
307 pte_t pte = *ptep; 307 pte_t pte = *ptep;
308 update_pte(ptep, pte_wrprotect(pte)); 308 update_pte(ptep, pte_wrprotect(pte));
309} 309}
310 310
311/* to find an entry in a kernel page-table-directory */ 311/* to find an entry in a kernel page-table-directory */
@@ -399,7 +399,7 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
399 */ 399 */
400 400
401#define io_remap_pfn_range(vma,from,pfn,size,prot) \ 401#define io_remap_pfn_range(vma,from,pfn,size,prot) \
402 remap_pfn_range(vma, from, pfn, size, prot) 402 remap_pfn_range(vma, from, pfn, size, prot)
403 403
404typedef pte_t *pte_addr_t; 404typedef pte_t *pte_addr_t;
405 405
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
index 7d936e58e9be..ec098b68fb9a 100644
--- a/arch/xtensa/include/asm/platform.h
+++ b/arch/xtensa/include/asm/platform.h
@@ -75,4 +75,3 @@ extern int platform_pcibios_fixup (void);
75extern void platform_calibrate_ccount (void); 75extern void platform_calibrate_ccount (void);
76 76
77#endif /* _XTENSA_PLATFORM_H */ 77#endif /* _XTENSA_PLATFORM_H */
78
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 2d630e7399ca..e5fb6b0abdf4 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -89,7 +89,7 @@
89#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000)) 89#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
90 90
91typedef struct { 91typedef struct {
92 unsigned long seg; 92 unsigned long seg;
93} mm_segment_t; 93} mm_segment_t;
94 94
95struct thread_struct { 95struct thread_struct {
@@ -145,10 +145,10 @@ struct thread_struct {
145 * set_thread_state in signal.c depends on it. 145 * set_thread_state in signal.c depends on it.
146 */ 146 */
147#define USER_PS_VALUE ((1 << PS_WOE_BIT) | \ 147#define USER_PS_VALUE ((1 << PS_WOE_BIT) | \
148 (1 << PS_CALLINC_SHIFT) | \ 148 (1 << PS_CALLINC_SHIFT) | \
149 (USER_RING << PS_RING_SHIFT) | \ 149 (USER_RING << PS_RING_SHIFT) | \
150 (1 << PS_UM_BIT) | \ 150 (1 << PS_UM_BIT) | \
151 (1 << PS_EXCM_BIT)) 151 (1 << PS_EXCM_BIT))
152 152
153/* Clearing a0 terminates the backtrace. */ 153/* Clearing a0 terminates the backtrace. */
154#define start_thread(regs, new_pc, new_sp) \ 154#define start_thread(regs, new_pc, new_sp) \
diff --git a/arch/xtensa/include/asm/prom.h b/arch/xtensa/include/asm/prom.h
new file mode 100644
index 000000000000..f3d7cd2c0de7
--- /dev/null
+++ b/arch/xtensa/include/asm/prom.h
@@ -0,0 +1,6 @@
1#ifndef _XTENSA_ASM_PROM_H
2#define _XTENSA_ASM_PROM_H
3
4#define HAVE_ARCH_DEVTREE_FIXUPS
5
6#endif /* _XTENSA_ASM_PROM_H */
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index da21c17f23aa..58bf6fd3f913 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -37,7 +37,7 @@ struct pt_regs {
37 unsigned long windowstart; /* 52 */ 37 unsigned long windowstart; /* 52 */
38 unsigned long syscall; /* 56 */ 38 unsigned long syscall; /* 56 */
39 unsigned long icountlevel; /* 60 */ 39 unsigned long icountlevel; /* 60 */
40 int reserved[1]; /* 64 */ 40 unsigned long scompare1; /* 64 */
41 41
42 /* Additional configurable registers that are used by the compiler. */ 42 /* Additional configurable registers that are used by the compiler. */
43 xtregs_opt_t xtregs_opt; 43 xtregs_opt_t xtregs_opt;
@@ -55,7 +55,7 @@ struct pt_regs {
55 55
56# define arch_has_single_step() (1) 56# define arch_has_single_step() (1)
57# define task_pt_regs(tsk) ((struct pt_regs*) \ 57# define task_pt_regs(tsk) ((struct pt_regs*) \
58 (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1) 58 (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
59# define user_mode(regs) (((regs)->ps & 0x00000020)!=0) 59# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
60# define instruction_pointer(regs) ((regs)->pc) 60# define instruction_pointer(regs) ((regs)->pc)
61 61
diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h
index 8a8aa61ccc8d..76096a4e5b8d 100644
--- a/arch/xtensa/include/asm/regs.h
+++ b/arch/xtensa/include/asm/regs.h
@@ -52,6 +52,10 @@
52#define EXCCAUSE_SPECULATION 7 52#define EXCCAUSE_SPECULATION 7
53#define EXCCAUSE_PRIVILEGED 8 53#define EXCCAUSE_PRIVILEGED 8
54#define EXCCAUSE_UNALIGNED 9 54#define EXCCAUSE_UNALIGNED 9
55#define EXCCAUSE_INSTR_DATA_ERROR 12
56#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13
57#define EXCCAUSE_INSTR_ADDR_ERROR 14
58#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15
55#define EXCCAUSE_ITLB_MISS 16 59#define EXCCAUSE_ITLB_MISS 16
56#define EXCCAUSE_ITLB_MULTIHIT 17 60#define EXCCAUSE_ITLB_MULTIHIT 17
57#define EXCCAUSE_ITLB_PRIVILEGE 18 61#define EXCCAUSE_ITLB_PRIVILEGE 18
@@ -105,4 +109,3 @@
105#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */ 109#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */
106 110
107#endif /* _XTENSA_SPECREG_H */ 111#endif /* _XTENSA_SPECREG_H */
108
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h
index 8ff23649581b..03975906b36f 100644
--- a/arch/xtensa/include/asm/spinlock.h
+++ b/arch/xtensa/include/asm/spinlock.h
@@ -11,6 +11,192 @@
11#ifndef _XTENSA_SPINLOCK_H 11#ifndef _XTENSA_SPINLOCK_H
12#define _XTENSA_SPINLOCK_H 12#define _XTENSA_SPINLOCK_H
13 13
14#include <linux/spinlock.h> 14/*
15 * spinlock
16 *
17 * There is at most one owner of a spinlock. There are not different
18 * types of spinlock owners like there are for rwlocks (see below).
19 *
20 * When trying to obtain a spinlock, the function "spins" forever, or busy-
21 * waits, until the lock is obtained. When spinning, presumably some other
22 * owner will soon give up the spinlock making it available to others. Use
23 * the trylock functions to avoid spinning forever.
24 *
25 * possible values:
26 *
27 * 0 nobody owns the spinlock
28 * 1 somebody owns the spinlock
29 */
30
31#define __raw_spin_is_locked(x) ((x)->slock != 0)
32#define __raw_spin_unlock_wait(lock) \
33 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
34
35#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
36
37static inline void __raw_spin_lock(raw_spinlock_t *lock)
38{
39 unsigned long tmp;
40
41 __asm__ __volatile__(
42 " movi %0, 0\n"
43 " wsr %0, scompare1\n"
44 "1: movi %0, 1\n"
45 " s32c1i %0, %1, 0\n"
46 " bnez %0, 1b\n"
47 : "=&a" (tmp)
48 : "a" (&lock->slock)
49 : "memory");
50}
51
52/* Returns 1 if the lock is obtained, 0 otherwise. */
53
54static inline int __raw_spin_trylock(raw_spinlock_t *lock)
55{
56 unsigned long tmp;
57
58 __asm__ __volatile__(
59 " movi %0, 0\n"
60 " wsr %0, scompare1\n"
61 " movi %0, 1\n"
62 " s32c1i %0, %1, 0\n"
63 : "=&a" (tmp)
64 : "a" (&lock->slock)
65 : "memory");
66
67 return tmp == 0 ? 1 : 0;
68}
69
70static inline void __raw_spin_unlock(raw_spinlock_t *lock)
71{
72 unsigned long tmp;
73
74 __asm__ __volatile__(
75 " movi %0, 0\n"
76 " s32ri %0, %1, 0\n"
77 : "=&a" (tmp)
78 : "a" (&lock->slock)
79 : "memory");
80}
81
82/*
83 * rwlock
84 *
85 * Read-write locks are really a more flexible spinlock. They allow
86 * multiple readers but only one writer. Write ownership is exclusive
87 * (i.e., all other readers and writers are blocked from ownership while
88 * there is a write owner). These rwlocks are unfair to writers. Writers
89 * can be starved for an indefinite time by readers.
90 *
91 * possible values:
92 *
93 * 0 nobody owns the rwlock
94 * >0 one or more readers own the rwlock
95 * (the positive value is the actual number of readers)
96 * 0x80000000 one writer owns the rwlock, no other writers, no readers
97 */
98
99#define __raw_write_can_lock(x) ((x)->lock == 0)
100
101static inline void __raw_write_lock(raw_rwlock_t *rw)
102{
103 unsigned long tmp;
104
105 __asm__ __volatile__(
106 " movi %0, 0\n"
107 " wsr %0, scompare1\n"
108 "1: movi %0, 1\n"
109 " slli %0, %0, 31\n"
110 " s32c1i %0, %1, 0\n"
111 " bnez %0, 1b\n"
112 : "=&a" (tmp)
113 : "a" (&rw->lock)
114 : "memory");
115}
116
117/* Returns 1 if the lock is obtained, 0 otherwise. */
118
119static inline int __raw_write_trylock(raw_rwlock_t *rw)
120{
121 unsigned long tmp;
122
123 __asm__ __volatile__(
124 " movi %0, 0\n"
125 " wsr %0, scompare1\n"
126 " movi %0, 1\n"
127 " slli %0, %0, 31\n"
128 " s32c1i %0, %1, 0\n"
129 : "=&a" (tmp)
130 : "a" (&rw->lock)
131 : "memory");
132
133 return tmp == 0 ? 1 : 0;
134}
135
136static inline void __raw_write_unlock(raw_rwlock_t *rw)
137{
138 unsigned long tmp;
139
140 __asm__ __volatile__(
141 " movi %0, 0\n"
142 " s32ri %0, %1, 0\n"
143 : "=&a" (tmp)
144 : "a" (&rw->lock)
145 : "memory");
146}
147
148static inline void __raw_read_lock(raw_rwlock_t *rw)
149{
150 unsigned long tmp;
151 unsigned long result;
152
153 __asm__ __volatile__(
154 "1: l32i %1, %2, 0\n"
155 " bltz %1, 1b\n"
156 " wsr %1, scompare1\n"
157 " addi %0, %1, 1\n"
158 " s32c1i %0, %2, 0\n"
159 " bne %0, %1, 1b\n"
160 : "=&a" (result), "=&a" (tmp)
161 : "a" (&rw->lock)
162 : "memory");
163}
164
165/* Returns 1 if the lock is obtained, 0 otherwise. */
166
167static inline int __raw_read_trylock(raw_rwlock_t *rw)
168{
169 unsigned long result;
170 unsigned long tmp;
171
172 __asm__ __volatile__(
173 " l32i %1, %2, 0\n"
174 " addi %0, %1, 1\n"
175 " bltz %0, 1f\n"
176 " wsr %1, scompare1\n"
177 " s32c1i %0, %2, 0\n"
178 " sub %0, %0, %1\n"
179 "1:\n"
180 : "=&a" (result), "=&a" (tmp)
181 : "a" (&rw->lock)
182 : "memory");
183
184 return result == 0;
185}
186
187static inline void __raw_read_unlock(raw_rwlock_t *rw)
188{
189 unsigned long tmp1, tmp2;
190
191 __asm__ __volatile__(
192 "1: l32i %1, %2, 0\n"
193 " addi %0, %1, -1\n"
194 " wsr %1, scompare1\n"
195 " s32c1i %0, %2, 0\n"
196 " bne %0, %1, 1b\n"
197 : "=&a" (tmp1), "=&a" (tmp2)
198 : "a" (&rw->lock)
199 : "memory");
200}
15 201
16#endif /* _XTENSA_SPINLOCK_H */ 202#endif /* _XTENSA_SPINLOCK_H */
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index b00c928d4cce..8d5e47fad095 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -25,9 +25,10 @@ asmlinkage long xtensa_fadvise64_64(int, int,
25/* Should probably move to linux/syscalls.h */ 25/* Should probably move to linux/syscalls.h */
26struct pollfd; 26struct pollfd;
27asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp, 27asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
28 fd_set __user *exp, struct timespec __user *tsp, void __user *sig); 28 fd_set __user *exp, struct timespec __user *tsp,
29 void __user *sig);
29asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, 30asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
30 struct timespec __user *tsp, const sigset_t __user *sigmask, 31 struct timespec __user *tsp,
31 size_t sigsetsize); 32 const sigset_t __user *sigmask,
32asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, 33 size_t sigsetsize);
33 size_t sigsetsize); 34asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
new file mode 100644
index 000000000000..54f70440185e
--- /dev/null
+++ b/arch/xtensa/include/asm/traps.h
@@ -0,0 +1,23 @@
1/*
2 * arch/xtensa/include/asm/traps.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2012 Tensilica Inc.
9 */
10#ifndef _XTENSA_TRAPS_H
11#define _XTENSA_TRAPS_H
12
13#include <asm/ptrace.h>
14
15/*
16 * handler must be either of the following:
17 * void (*)(struct pt_regs *regs);
18 * void (*)(struct pt_regs *regs, unsigned long exccause);
19 */
20extern void * __init trap_set_handler(int cause, void *handler);
21extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
22
23#endif /* _XTENSA_TRAPS_H */
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 6e4bb3b791ab..fd686dc45d1a 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -180,7 +180,8 @@
180#define segment_eq(a,b) ((a).seg == (b).seg) 180#define segment_eq(a,b) ((a).seg == (b).seg)
181 181
182#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 182#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
183#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) 183#define __user_ok(addr,size) \
184 (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
184#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) 185#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
185#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) 186#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
186 187
@@ -234,10 +235,10 @@ do { \
234 int __cb; \ 235 int __cb; \
235 retval = 0; \ 236 retval = 0; \
236 switch (size) { \ 237 switch (size) { \
237 case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \ 238 case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \
238 case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \ 239 case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \
239 case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \ 240 case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \
240 case 8: { \ 241 case 8: { \
241 __typeof__(*ptr) __v64 = x; \ 242 __typeof__(*ptr) __v64 = x; \
242 retval = __copy_to_user(ptr,&__v64,8); \ 243 retval = __copy_to_user(ptr,&__v64,8); \
243 break; \ 244 break; \
@@ -291,7 +292,7 @@ do { \
291 * __check_align_* macros still work. 292 * __check_align_* macros still work.
292 */ 293 */
293#define __put_user_asm(x, addr, err, align, insn, cb) \ 294#define __put_user_asm(x, addr, err, align, insn, cb) \
294 __asm__ __volatile__( \ 295__asm__ __volatile__( \
295 __check_align_##align \ 296 __check_align_##align \
296 "1: "insn" %2, %3, 0 \n" \ 297 "1: "insn" %2, %3, 0 \n" \
297 "2: \n" \ 298 "2: \n" \
@@ -301,8 +302,8 @@ do { \
301 " .long 2b \n" \ 302 " .long 2b \n" \
302 "5: \n" \ 303 "5: \n" \
303 " l32r %1, 4b \n" \ 304 " l32r %1, 4b \n" \
304 " movi %0, %4 \n" \ 305 " movi %0, %4 \n" \
305 " jx %1 \n" \ 306 " jx %1 \n" \
306 " .previous \n" \ 307 " .previous \n" \
307 " .section __ex_table,\"a\" \n" \ 308 " .section __ex_table,\"a\" \n" \
308 " .long 1b, 5b \n" \ 309 " .long 1b, 5b \n" \
@@ -334,13 +335,13 @@ extern long __get_user_bad(void);
334do { \ 335do { \
335 int __cb; \ 336 int __cb; \
336 retval = 0; \ 337 retval = 0; \
337 switch (size) { \ 338 switch (size) { \
338 case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \ 339 case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \
339 case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \ 340 case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \
340 case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \ 341 case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \
341 case 8: retval = __copy_from_user(&x,ptr,8); break; \ 342 case 8: retval = __copy_from_user(&x,ptr,8); break; \
342 default: (x) = __get_user_bad(); \ 343 default: (x) = __get_user_bad(); \
343 } \ 344 } \
344} while (0) 345} while (0)
345 346
346 347
@@ -349,7 +350,7 @@ do { \
349 * __check_align_* macros still work. 350 * __check_align_* macros still work.
350 */ 351 */
351#define __get_user_asm(x, addr, err, align, insn, cb) \ 352#define __get_user_asm(x, addr, err, align, insn, cb) \
352 __asm__ __volatile__( \ 353__asm__ __volatile__( \
353 __check_align_##align \ 354 __check_align_##align \
354 "1: "insn" %2, %3, 0 \n" \ 355 "1: "insn" %2, %3, 0 \n" \
355 "2: \n" \ 356 "2: \n" \
@@ -360,8 +361,8 @@ do { \
360 "5: \n" \ 361 "5: \n" \
361 " l32r %1, 4b \n" \ 362 " l32r %1, 4b \n" \
362 " movi %2, 0 \n" \ 363 " movi %2, 0 \n" \
363 " movi %0, %4 \n" \ 364 " movi %0, %4 \n" \
364 " jx %1 \n" \ 365 " jx %1 \n" \
365 " .previous \n" \ 366 " .previous \n" \
366 " .section __ex_table,\"a\" \n" \ 367 " .section __ex_table,\"a\" \n" \
367 " .long 1b, 5b \n" \ 368 " .long 1b, 5b \n" \
@@ -421,8 +422,10 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
421 422
422#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n)) 423#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n))
423#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n)) 424#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n))
424#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n)) 425#define __copy_to_user(to,from,n) \
425#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n)) 426 __generic_copy_to_user_nocheck((to),(from),(n))
427#define __copy_from_user(to,from,n) \
428 __generic_copy_from_user_nocheck((to),(from),(n))
426#define __copy_to_user_inatomic __copy_to_user 429#define __copy_to_user_inatomic __copy_to_user
427#define __copy_from_user_inatomic __copy_from_user 430#define __copy_from_user_inatomic __copy_from_user
428 431
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index f36cef5a62ff..c3a59d992ac0 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -23,13 +23,13 @@ obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
23# 23#
24# Replicate rules in scripts/Makefile.build 24# Replicate rules in scripts/Makefile.build
25 25
26sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \ 26sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \
27 -e 's/\.text\.unlikely/.literal.unlikely .text.unlikely/g' \ 27 -e 's/\.text\.unlikely/.literal.unlikely .text.unlikely/g' \
28 -e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g' 28 -e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g'
29 29
30quiet_cmd__cpp_lds_S = LDS $@ 30quiet_cmd__cpp_lds_S = LDS $@
31 cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \ 31cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \
32 | sed $(sed-y) >$@ 32 | sed $(sed-y) >$@
33 33
34$(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE 34$(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE
35 $(call if_changed_dep,_cpp_lds_S) 35 $(call if_changed_dep,_cpp_lds_S)
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
index 934ae58e2c79..aa2e87b8566a 100644
--- a/arch/xtensa/kernel/align.S
+++ b/arch/xtensa/kernel/align.S
@@ -442,7 +442,7 @@ ENTRY(fast_unaligned)
442 mov a1, a2 442 mov a1, a2
443 443
444 rsr a0, ps 444 rsr a0, ps
445 bbsi.l a2, PS_UM_BIT, 1f # jump if user mode 445 bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
446 446
447 movi a0, _kernel_exception 447 movi a0, _kernel_exception
448 jx a0 448 jx a0
@@ -450,6 +450,6 @@ ENTRY(fast_unaligned)
4501: movi a0, _user_exception 4501: movi a0, _user_exception
451 jx a0 451 jx a0
452 452
453ENDPROC(fast_unaligned)
453 454
454#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */ 455#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
455
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index 7dc3f9157185..0701fad170db 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -41,6 +41,7 @@ int main(void)
41 DEFINE(PT_SAR, offsetof (struct pt_regs, sar)); 41 DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
42 DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel)); 42 DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel));
43 DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall)); 43 DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
44 DEFINE(PT_SCOMPARE1, offsetof(struct pt_regs, scompare1));
44 DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0])); 45 DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
45 DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0])); 46 DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
46 DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1])); 47 DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
@@ -91,7 +92,8 @@ int main(void)
91#endif 92#endif
92 DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user)); 93 DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
93 DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t)); 94 DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
94 DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds)); 95 DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, \
96 thread.current_ds));
95 97
96 /* struct mm_struct */ 98 /* struct mm_struct */
97 DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users)); 99 DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
@@ -108,4 +110,3 @@ int main(void)
108 110
109 return 0; 111 return 0;
110} 112}
111
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 54c3be313bfa..647657484866 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -43,10 +43,13 @@
43/* IO protection is currently unsupported. */ 43/* IO protection is currently unsupported. */
44 44
45ENTRY(fast_io_protect) 45ENTRY(fast_io_protect)
46
46 wsr a0, excsave1 47 wsr a0, excsave1
47 movi a0, unrecoverable_exception 48 movi a0, unrecoverable_exception
48 callx0 a0 49 callx0 a0
49 50
51ENDPROC(fast_io_protect)
52
50#if XTENSA_HAVE_COPROCESSORS 53#if XTENSA_HAVE_COPROCESSORS
51 54
52/* 55/*
@@ -139,6 +142,7 @@ ENTRY(fast_io_protect)
139 */ 142 */
140 143
141ENTRY(coprocessor_save) 144ENTRY(coprocessor_save)
145
142 entry a1, 32 146 entry a1, 32
143 s32i a0, a1, 0 147 s32i a0, a1, 0
144 movi a0, .Lsave_cp_regs_jump_table 148 movi a0, .Lsave_cp_regs_jump_table
@@ -150,7 +154,10 @@ ENTRY(coprocessor_save)
1501: l32i a0, a1, 0 1541: l32i a0, a1, 0
151 retw 155 retw
152 156
157ENDPROC(coprocessor_save)
158
153ENTRY(coprocessor_load) 159ENTRY(coprocessor_load)
160
154 entry a1, 32 161 entry a1, 32
155 s32i a0, a1, 0 162 s32i a0, a1, 0
156 movi a0, .Lload_cp_regs_jump_table 163 movi a0, .Lload_cp_regs_jump_table
@@ -162,8 +169,10 @@ ENTRY(coprocessor_load)
1621: l32i a0, a1, 0 1691: l32i a0, a1, 0
163 retw 170 retw
164 171
172ENDPROC(coprocessor_load)
173
165/* 174/*
166 * coprocessor_flush(struct task_info*, index) 175 * coprocessor_flush(struct task_info*, index)
167 * a2 a3 176 * a2 a3
168 * coprocessor_restore(struct task_info*, index) 177 * coprocessor_restore(struct task_info*, index)
169 * a2 a3 178 * a2 a3
@@ -178,6 +187,7 @@ ENTRY(coprocessor_load)
178 187
179 188
180ENTRY(coprocessor_flush) 189ENTRY(coprocessor_flush)
190
181 entry a1, 32 191 entry a1, 32
182 s32i a0, a1, 0 192 s32i a0, a1, 0
183 movi a0, .Lsave_cp_regs_jump_table 193 movi a0, .Lsave_cp_regs_jump_table
@@ -191,6 +201,8 @@ ENTRY(coprocessor_flush)
1911: l32i a0, a1, 0 2011: l32i a0, a1, 0
192 retw 202 retw
193 203
204ENDPROC(coprocessor_flush)
205
194ENTRY(coprocessor_restore) 206ENTRY(coprocessor_restore)
195 entry a1, 32 207 entry a1, 32
196 s32i a0, a1, 0 208 s32i a0, a1, 0
@@ -205,6 +217,8 @@ ENTRY(coprocessor_restore)
2051: l32i a0, a1, 0 2171: l32i a0, a1, 0
206 retw 218 retw
207 219
220ENDPROC(coprocessor_restore)
221
208/* 222/*
209 * Entry condition: 223 * Entry condition:
210 * 224 *
@@ -220,10 +234,12 @@ ENTRY(coprocessor_restore)
220 */ 234 */
221 235
222ENTRY(fast_coprocessor_double) 236ENTRY(fast_coprocessor_double)
237
223 wsr a0, excsave1 238 wsr a0, excsave1
224 movi a0, unrecoverable_exception 239 movi a0, unrecoverable_exception
225 callx0 a0 240 callx0 a0
226 241
242ENDPROC(fast_coprocessor_double)
227 243
228ENTRY(fast_coprocessor) 244ENTRY(fast_coprocessor)
229 245
@@ -327,9 +343,14 @@ ENTRY(fast_coprocessor)
327 343
328 rfe 344 rfe
329 345
346ENDPROC(fast_coprocessor)
347
330 .data 348 .data
349
331ENTRY(coprocessor_owner) 350ENTRY(coprocessor_owner)
351
332 .fill XCHAL_CP_MAX, 4, 0 352 .fill XCHAL_CP_MAX, 4, 0
333 353
334#endif /* XTENSA_HAVE_COPROCESSORS */ 354END(coprocessor_owner)
335 355
356#endif /* XTENSA_HAVE_COPROCESSORS */
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 90bfc1dbc13d..3777fec85e7c 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -219,6 +219,7 @@ _user_exception:
219 219
220 j common_exception 220 j common_exception
221 221
222ENDPROC(user_exception)
222 223
223/* 224/*
224 * First-level exit handler for kernel exceptions 225 * First-level exit handler for kernel exceptions
@@ -371,6 +372,13 @@ common_exception:
371 s32i a2, a1, PT_LBEG 372 s32i a2, a1, PT_LBEG
372 s32i a3, a1, PT_LEND 373 s32i a3, a1, PT_LEND
373 374
375 /* Save SCOMPARE1 */
376
377#if XCHAL_HAVE_S32C1I
378 rsr a2, scompare1
379 s32i a2, a1, PT_SCOMPARE1
380#endif
381
374 /* Save optional registers. */ 382 /* Save optional registers. */
375 383
376 save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT 384 save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
@@ -432,6 +440,12 @@ common_exception_return:
432 440
433 load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT 441 load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
434 442
443 /* Restore SCOMPARE1 */
444
445#if XCHAL_HAVE_S32C1I
446 l32i a2, a1, PT_SCOMPARE1
447 wsr a2, scompare1
448#endif
435 wsr a3, ps /* disable interrupts */ 449 wsr a3, ps /* disable interrupts */
436 450
437 _bbci.l a3, PS_UM_BIT, kernel_exception_exit 451 _bbci.l a3, PS_UM_BIT, kernel_exception_exit
@@ -641,6 +655,8 @@ common_exception_exit:
641 l32i a1, a1, PT_AREG1 655 l32i a1, a1, PT_AREG1
642 rfde 656 rfde
643 657
658ENDPROC(kernel_exception)
659
644/* 660/*
645 * Debug exception handler. 661 * Debug exception handler.
646 * 662 *
@@ -701,6 +717,7 @@ ENTRY(debug_exception)
701 /* Debug exception while in exception mode. */ 717 /* Debug exception while in exception mode. */
7021: j 1b // FIXME!! 7181: j 1b // FIXME!!
703 719
720ENDPROC(debug_exception)
704 721
705/* 722/*
706 * We get here in case of an unrecoverable exception. 723 * We get here in case of an unrecoverable exception.
@@ -751,6 +768,7 @@ ENTRY(unrecoverable_exception)
751 768
7521: j 1b 7691: j 1b
753 770
771ENDPROC(unrecoverable_exception)
754 772
755/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ 773/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
756 774
@@ -856,7 +874,7 @@ ENTRY(fast_alloca)
856 874
857 _bnei a0, 1, 1f # no 'movsp a1, ax': jump 875 _bnei a0, 1, 1f # no 'movsp a1, ax': jump
858 876
859 /* Move the save area. This implies the use of the L32E 877 /* Move the save area. This implies the use of the L32E
860 * and S32E instructions, because this move must be done with 878 * and S32E instructions, because this move must be done with
861 * the user's PS.RING privilege levels, not with ring 0 879 * the user's PS.RING privilege levels, not with ring 0
862 * (kernel's) privileges currently active with PS.EXCM 880 * (kernel's) privileges currently active with PS.EXCM
@@ -929,6 +947,7 @@ ENTRY(fast_alloca)
929 l32i a2, a2, PT_AREG2 947 l32i a2, a2, PT_AREG2
930 rfe 948 rfe
931 949
950ENDPROC(fast_alloca)
932 951
933/* 952/*
934 * fast system calls. 953 * fast system calls.
@@ -966,6 +985,8 @@ ENTRY(fast_syscall_kernel)
966 985
967 j kernel_exception 986 j kernel_exception
968 987
988ENDPROC(fast_syscall_kernel)
989
969ENTRY(fast_syscall_user) 990ENTRY(fast_syscall_user)
970 991
971 /* Skip syscall. */ 992 /* Skip syscall. */
@@ -983,19 +1004,21 @@ ENTRY(fast_syscall_user)
983 1004
984 j user_exception 1005 j user_exception
985 1006
986ENTRY(fast_syscall_unrecoverable) 1007ENDPROC(fast_syscall_user)
987 1008
988 /* Restore all states. */ 1009ENTRY(fast_syscall_unrecoverable)
989 1010
990 l32i a0, a2, PT_AREG0 # restore a0 1011 /* Restore all states. */
991 xsr a2, depc # restore a2, depc
992 rsr a3, excsave1
993 1012
994 wsr a0, excsave1 1013 l32i a0, a2, PT_AREG0 # restore a0
995 movi a0, unrecoverable_exception 1014 xsr a2, depc # restore a2, depc
996 callx0 a0 1015 rsr a3, excsave1
997 1016
1017 wsr a0, excsave1
1018 movi a0, unrecoverable_exception
1019 callx0 a0
998 1020
1021ENDPROC(fast_syscall_unrecoverable)
999 1022
1000/* 1023/*
1001 * sysxtensa syscall handler 1024 * sysxtensa syscall handler
@@ -1101,7 +1124,7 @@ CATCH
1101 movi a2, -EINVAL 1124 movi a2, -EINVAL
1102 rfe 1125 rfe
1103 1126
1104 1127ENDPROC(fast_syscall_xtensa)
1105 1128
1106 1129
1107/* fast_syscall_spill_registers. 1130/* fast_syscall_spill_registers.
@@ -1160,6 +1183,8 @@ ENTRY(fast_syscall_spill_registers)
1160 movi a2, 0 1183 movi a2, 0
1161 rfe 1184 rfe
1162 1185
1186ENDPROC(fast_syscall_spill_registers)
1187
1163/* Fixup handler. 1188/* Fixup handler.
1164 * 1189 *
1165 * We get here if the spill routine causes an exception, e.g. tlb miss. 1190 * We get here if the spill routine causes an exception, e.g. tlb miss.
@@ -1228,9 +1253,9 @@ fast_syscall_spill_registers_fixup:
1228 1253
1229 movi a3, exc_table 1254 movi a3, exc_table
1230 rsr a0, exccause 1255 rsr a0, exccause
1231 addx4 a0, a0, a3 # find entry in table 1256 addx4 a0, a0, a3 # find entry in table
1232 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 1257 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
1233 jx a0 1258 jx a0
1234 1259
1235fast_syscall_spill_registers_fixup_return: 1260fast_syscall_spill_registers_fixup_return:
1236 1261
@@ -1432,7 +1457,7 @@ ENTRY(_spill_registers)
1432 rsr a0, ps 1457 rsr a0, ps
1433 _bbci.l a0, PS_UM_BIT, 1f 1458 _bbci.l a0, PS_UM_BIT, 1f
1434 1459
1435 /* User space: Setup a dummy frame and kill application. 1460 /* User space: Setup a dummy frame and kill application.
1436 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. 1461 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
1437 */ 1462 */
1438 1463
@@ -1464,6 +1489,8 @@ ENTRY(_spill_registers)
1464 callx0 a0 # should not return 1489 callx0 a0 # should not return
14651: j 1b 14901: j 1b
1466 1491
1492ENDPROC(_spill_registers)
1493
1467#ifdef CONFIG_MMU 1494#ifdef CONFIG_MMU
1468/* 1495/*
1469 * We should never get here. Bail out! 1496 * We should never get here. Bail out!
@@ -1475,6 +1502,8 @@ ENTRY(fast_second_level_miss_double_kernel)
1475 callx0 a0 # should not return 1502 callx0 a0 # should not return
14761: j 1b 15031: j 1b
1477 1504
1505ENDPROC(fast_second_level_miss_double_kernel)
1506
1478/* First-level entry handler for user, kernel, and double 2nd-level 1507/* First-level entry handler for user, kernel, and double 2nd-level
1479 * TLB miss exceptions. Note that for now, user and kernel miss 1508 * TLB miss exceptions. Note that for now, user and kernel miss
1480 * exceptions share the same entry point and are handled identically. 1509 * exceptions share the same entry point and are handled identically.
@@ -1682,6 +1711,7 @@ ENTRY(fast_second_level_miss)
1682 j _kernel_exception 1711 j _kernel_exception
16831: j _user_exception 17121: j _user_exception
1684 1713
1714ENDPROC(fast_second_level_miss)
1685 1715
1686/* 1716/*
1687 * StoreProhibitedException 1717 * StoreProhibitedException
@@ -1777,6 +1807,9 @@ ENTRY(fast_store_prohibited)
1777 bbsi.l a2, PS_UM_BIT, 1f 1807 bbsi.l a2, PS_UM_BIT, 1f
1778 j _kernel_exception 1808 j _kernel_exception
17791: j _user_exception 18091: j _user_exception
1810
1811ENDPROC(fast_store_prohibited)
1812
1780#endif /* CONFIG_MMU */ 1813#endif /* CONFIG_MMU */
1781 1814
1782/* 1815/*
@@ -1787,6 +1820,7 @@ ENTRY(fast_store_prohibited)
1787 */ 1820 */
1788 1821
1789ENTRY(system_call) 1822ENTRY(system_call)
1823
1790 entry a1, 32 1824 entry a1, 32
1791 1825
1792 /* regs->syscall = regs->areg[2] */ 1826 /* regs->syscall = regs->areg[2] */
@@ -1831,6 +1865,8 @@ ENTRY(system_call)
1831 callx4 a4 1865 callx4 a4
1832 retw 1866 retw
1833 1867
1868ENDPROC(system_call)
1869
1834 1870
1835/* 1871/*
1836 * Task switch. 1872 * Task switch.
@@ -1899,6 +1935,7 @@ ENTRY(_switch_to)
1899 1935
1900 retw 1936 retw
1901 1937
1938ENDPROC(_switch_to)
1902 1939
1903ENTRY(ret_from_fork) 1940ENTRY(ret_from_fork)
1904 1941
@@ -1914,6 +1951,8 @@ ENTRY(ret_from_fork)
1914 1951
1915 j common_exception_return 1952 j common_exception_return
1916 1953
1954ENDPROC(ret_from_fork)
1955
1917/* 1956/*
1918 * Kernel thread creation helper 1957 * Kernel thread creation helper
1919 * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg 1958 * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index bdc50788f35e..91d9095284de 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -18,6 +18,7 @@
18#include <asm/processor.h> 18#include <asm/processor.h>
19#include <asm/page.h> 19#include <asm/page.h>
20#include <asm/cacheasm.h> 20#include <asm/cacheasm.h>
21#include <asm/initialize_mmu.h>
21 22
22#include <linux/init.h> 23#include <linux/init.h>
23#include <linux/linkage.h> 24#include <linux/linkage.h>
@@ -47,16 +48,19 @@
47 */ 48 */
48 49
49 __HEAD 50 __HEAD
50 .globl _start 51ENTRY(_start)
51_start: _j 2f 52
53 _j 2f
52 .align 4 54 .align 4
531: .word _startup 551: .word _startup
542: l32r a0, 1b 562: l32r a0, 1b
55 jx a0 57 jx a0
56 58
59ENDPROC(_start)
60
57 .section .init.text, "ax" 61 .section .init.text, "ax"
58 .align 4 62
59_startup: 63ENTRY(_startup)
60 64
61 /* Disable interrupts and exceptions. */ 65 /* Disable interrupts and exceptions. */
62 66
@@ -107,7 +111,7 @@ _startup:
107 /* Disable all timers. */ 111 /* Disable all timers. */
108 112
109 .set _index, 0 113 .set _index, 0
110 .rept XCHAL_NUM_TIMERS - 1 114 .rept XCHAL_NUM_TIMERS
111 wsr a0, SREG_CCOMPARE + _index 115 wsr a0, SREG_CCOMPARE + _index
112 .set _index, _index + 1 116 .set _index, _index + 1
113 .endr 117 .endr
@@ -120,7 +124,7 @@ _startup:
120 124
121 /* Disable coprocessors. */ 125 /* Disable coprocessors. */
122 126
123#if XCHAL_CP_NUM > 0 127#if XCHAL_HAVE_CP
124 wsr a0, cpenable 128 wsr a0, cpenable
125#endif 129#endif
126 130
@@ -152,6 +156,8 @@ _startup:
152 156
153 isync 157 isync
154 158
159 initialize_mmu
160
155 /* Unpack data sections 161 /* Unpack data sections
156 * 162 *
157 * The linker script used to build the Linux kernel image 163 * The linker script used to build the Linux kernel image
@@ -230,6 +236,7 @@ _startup:
230should_never_return: 236should_never_return:
231 j should_never_return 237 j should_never_return
232 238
239ENDPROC(_startup)
233 240
234/* 241/*
235 * BSS section 242 * BSS section
@@ -239,6 +246,8 @@ __PAGE_ALIGNED_BSS
239#ifdef CONFIG_MMU 246#ifdef CONFIG_MMU
240ENTRY(swapper_pg_dir) 247ENTRY(swapper_pg_dir)
241 .fill PAGE_SIZE, 1, 0 248 .fill PAGE_SIZE, 1, 0
249END(swapper_pg_dir)
242#endif 250#endif
243ENTRY(empty_zero_page) 251ENTRY(empty_zero_page)
244 .fill PAGE_SIZE, 1, 0 252 .fill PAGE_SIZE, 1, 0
253END(empty_zero_page)
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a6ce3e563739..6f4f9749cff7 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -18,6 +18,8 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <linux/kernel_stat.h> 20#include <linux/kernel_stat.h>
21#include <linux/irqdomain.h>
22#include <linux/of.h>
21 23
22#include <asm/uaccess.h> 24#include <asm/uaccess.h>
23#include <asm/platform.h> 25#include <asm/platform.h>
@@ -26,19 +28,22 @@ static unsigned int cached_irq_mask;
26 28
27atomic_t irq_err_count; 29atomic_t irq_err_count;
28 30
31static struct irq_domain *root_domain;
32
29/* 33/*
30 * do_IRQ handles all normal device IRQ's (the special 34 * do_IRQ handles all normal device IRQ's (the special
31 * SMP cross-CPU interrupts have their own specific 35 * SMP cross-CPU interrupts have their own specific
32 * handlers). 36 * handlers).
33 */ 37 */
34 38
35asmlinkage void do_IRQ(int irq, struct pt_regs *regs) 39asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
36{ 40{
37 struct pt_regs *old_regs = set_irq_regs(regs); 41 struct pt_regs *old_regs = set_irq_regs(regs);
42 int irq = irq_find_mapping(root_domain, hwirq);
38 43
39 if (irq >= NR_IRQS) { 44 if (hwirq >= NR_IRQS) {
40 printk(KERN_EMERG "%s: cannot handle IRQ %d\n", 45 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
41 __func__, irq); 46 __func__, hwirq);
42 } 47 }
43 48
44 irq_enter(); 49 irq_enter();
@@ -71,40 +76,39 @@ int arch_show_interrupts(struct seq_file *p, int prec)
71 76
72static void xtensa_irq_mask(struct irq_data *d) 77static void xtensa_irq_mask(struct irq_data *d)
73{ 78{
74 cached_irq_mask &= ~(1 << d->irq); 79 cached_irq_mask &= ~(1 << d->hwirq);
75 set_sr (cached_irq_mask, intenable); 80 set_sr (cached_irq_mask, intenable);
76} 81}
77 82
78static void xtensa_irq_unmask(struct irq_data *d) 83static void xtensa_irq_unmask(struct irq_data *d)
79{ 84{
80 cached_irq_mask |= 1 << d->irq; 85 cached_irq_mask |= 1 << d->hwirq;
81 set_sr (cached_irq_mask, intenable); 86 set_sr (cached_irq_mask, intenable);
82} 87}
83 88
84static void xtensa_irq_enable(struct irq_data *d) 89static void xtensa_irq_enable(struct irq_data *d)
85{ 90{
86 variant_irq_enable(d->irq); 91 variant_irq_enable(d->hwirq);
87 xtensa_irq_unmask(d); 92 xtensa_irq_unmask(d);
88} 93}
89 94
90static void xtensa_irq_disable(struct irq_data *d) 95static void xtensa_irq_disable(struct irq_data *d)
91{ 96{
92 xtensa_irq_mask(d); 97 xtensa_irq_mask(d);
93 variant_irq_disable(d->irq); 98 variant_irq_disable(d->hwirq);
94} 99}
95 100
96static void xtensa_irq_ack(struct irq_data *d) 101static void xtensa_irq_ack(struct irq_data *d)
97{ 102{
98 set_sr(1 << d->irq, intclear); 103 set_sr(1 << d->hwirq, intclear);
99} 104}
100 105
101static int xtensa_irq_retrigger(struct irq_data *d) 106static int xtensa_irq_retrigger(struct irq_data *d)
102{ 107{
103 set_sr (1 << d->irq, INTSET); 108 set_sr(1 << d->hwirq, intset);
104 return 1; 109 return 1;
105} 110}
106 111
107
108static struct irq_chip xtensa_irq_chip = { 112static struct irq_chip xtensa_irq_chip = {
109 .name = "xtensa", 113 .name = "xtensa",
110 .irq_enable = xtensa_irq_enable, 114 .irq_enable = xtensa_irq_enable,
@@ -115,37 +119,99 @@ static struct irq_chip xtensa_irq_chip = {
115 .irq_retrigger = xtensa_irq_retrigger, 119 .irq_retrigger = xtensa_irq_retrigger,
116}; 120};
117 121
118void __init init_IRQ(void) 122static int xtensa_irq_map(struct irq_domain *d, unsigned int irq,
123 irq_hw_number_t hw)
119{ 124{
120 int index; 125 u32 mask = 1 << hw;
121 126
122 for (index = 0; index < XTENSA_NR_IRQS; index++) { 127 if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) {
123 int mask = 1 << index; 128 irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
124 129 handle_simple_irq, "level");
125 if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) 130 irq_set_status_flags(irq, IRQ_LEVEL);
126 irq_set_chip_and_handler(index, &xtensa_irq_chip, 131 } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) {
127 handle_simple_irq); 132 irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
133 handle_edge_irq, "edge");
134 irq_clear_status_flags(irq, IRQ_LEVEL);
135 } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) {
136 irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
137 handle_level_irq, "level");
138 irq_set_status_flags(irq, IRQ_LEVEL);
139 } else if (mask & XCHAL_INTTYPE_MASK_TIMER) {
140 irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
141 handle_edge_irq, "edge");
142 irq_clear_status_flags(irq, IRQ_LEVEL);
143 } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */
144 /* XCHAL_INTTYPE_MASK_NMI */
145
146 irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
147 handle_level_irq, "level");
148 irq_set_status_flags(irq, IRQ_LEVEL);
149 }
150 return 0;
151}
128 152
129 else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) 153static unsigned map_ext_irq(unsigned ext_irq)
130 irq_set_chip_and_handler(index, &xtensa_irq_chip, 154{
131 handle_edge_irq); 155 unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE |
156 XCHAL_INTTYPE_MASK_EXTERN_LEVEL;
157 unsigned i;
132 158
133 else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) 159 for (i = 0; mask; ++i, mask >>= 1) {
134 irq_set_chip_and_handler(index, &xtensa_irq_chip, 160 if ((mask & 1) && ext_irq-- == 0)
135 handle_level_irq); 161 return i;
162 }
163 return XCHAL_NUM_INTERRUPTS;
164}
136 165
137 else if (mask & XCHAL_INTTYPE_MASK_TIMER) 166/*
138 irq_set_chip_and_handler(index, &xtensa_irq_chip, 167 * Device Tree IRQ specifier translation function which works with one or
139 handle_edge_irq); 168 * two cell bindings. First cell value maps directly to the hwirq number.
169 * Second cell if present specifies whether hwirq number is external (1) or
170 * internal (0).
171 */
172int xtensa_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
173 const u32 *intspec, unsigned int intsize,
174 unsigned long *out_hwirq, unsigned int *out_type)
175{
176 if (WARN_ON(intsize < 1 || intsize > 2))
177 return -EINVAL;
178 if (intsize == 2 && intspec[1] == 1) {
179 unsigned int_irq = map_ext_irq(intspec[0]);
180 if (int_irq < XCHAL_NUM_INTERRUPTS)
181 *out_hwirq = int_irq;
182 else
183 return -EINVAL;
184 } else {
185 *out_hwirq = intspec[0];
186 }
187 *out_type = IRQ_TYPE_NONE;
188 return 0;
189}
140 190
141 else /* XCHAL_INTTYPE_MASK_WRITE_ERROR */ 191static const struct irq_domain_ops xtensa_irq_domain_ops = {
142 /* XCHAL_INTTYPE_MASK_NMI */ 192 .xlate = xtensa_irq_domain_xlate,
193 .map = xtensa_irq_map,
194};
143 195
144 irq_set_chip_and_handler(index, &xtensa_irq_chip, 196void __init init_IRQ(void)
145 handle_level_irq); 197{
146 } 198 struct device_node *intc = NULL;
147 199
148 cached_irq_mask = 0; 200 cached_irq_mask = 0;
201 set_sr(~0, intclear);
202
203#ifdef CONFIG_OF
204 /* The interrupt controller device node is mandatory */
205 intc = of_find_compatible_node(NULL, NULL, "xtensa,pic");
206 BUG_ON(!intc);
207
208 root_domain = irq_domain_add_linear(intc, NR_IRQS,
209 &xtensa_irq_domain_ops, NULL);
210#else
211 root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0,
212 &xtensa_irq_domain_ops, NULL);
213#endif
214 irq_set_default_host(root_domain);
149 215
150 variant_init_irq(); 216 variant_init_irq();
151} 217}
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
index 451dda928c93..b715237bae61 100644
--- a/arch/xtensa/kernel/module.c
+++ b/arch/xtensa/kernel/module.c
@@ -53,7 +53,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
53 struct module *mod) 53 struct module *mod)
54{ 54{
55 unsigned int i; 55 unsigned int i;
56 Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; 56 Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
57 Elf32_Sym *sym; 57 Elf32_Sym *sym;
58 unsigned char *location; 58 unsigned char *location;
59 uint32_t value; 59 uint32_t value;
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
index 97230e46cbe7..44bf21c3769a 100644
--- a/arch/xtensa/kernel/platform.c
+++ b/arch/xtensa/kernel/platform.c
@@ -44,4 +44,3 @@ _F(void, calibrate_ccount, (void),
44 ccount_per_jiffy = 10 * (1000000UL/HZ); 44 ccount_per_jiffy = 10 * (1000000UL/HZ);
45}); 45});
46#endif 46#endif
47
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 1accf28da5f5..0dd5784416d3 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -108,7 +108,7 @@ void coprocessor_flush_all(struct thread_info *ti)
108 108
109void cpu_idle(void) 109void cpu_idle(void)
110{ 110{
111 local_irq_enable(); 111 local_irq_enable();
112 112
113 /* endless idle loop with no priority at all */ 113 /* endless idle loop with no priority at all */
114 while (1) { 114 while (1) {
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 33eea4c16f12..61fb2e9e9035 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -154,7 +154,7 @@ int ptrace_setxregs(struct task_struct *child, void __user *uregs)
154 coprocessor_flush_all(ti); 154 coprocessor_flush_all(ti);
155 coprocessor_release_all(ti); 155 coprocessor_release_all(ti);
156 156
157 ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0, 157 ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
158 sizeof(xtregs_coprocessor_t)); 158 sizeof(xtregs_coprocessor_t));
159#endif 159#endif
160 ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt, 160 ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
@@ -343,4 +343,3 @@ void do_syscall_trace_leave(struct pt_regs *regs)
343 && (current->ptrace & PT_PTRACED)) 343 && (current->ptrace & PT_PTRACED))
344 do_syscall_trace(); 344 do_syscall_trace();
345} 345}
346
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index b237988ba6d7..24c1a57abb40 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -22,6 +22,11 @@
22#include <linux/bootmem.h> 22#include <linux/bootmem.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24 24
25#ifdef CONFIG_OF
26#include <linux/of_fdt.h>
27#include <linux/of_platform.h>
28#endif
29
25#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) 30#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
26# include <linux/console.h> 31# include <linux/console.h>
27#endif 32#endif
@@ -42,6 +47,7 @@
42#include <asm/page.h> 47#include <asm/page.h>
43#include <asm/setup.h> 48#include <asm/setup.h>
44#include <asm/param.h> 49#include <asm/param.h>
50#include <asm/traps.h>
45 51
46#include <platform/hardware.h> 52#include <platform/hardware.h>
47 53
@@ -64,6 +70,11 @@ int initrd_is_mapped = 0;
64extern int initrd_below_start_ok; 70extern int initrd_below_start_ok;
65#endif 71#endif
66 72
73#ifdef CONFIG_OF
74extern u32 __dtb_start[];
75void *dtb_start = __dtb_start;
76#endif
77
67unsigned char aux_device_present; 78unsigned char aux_device_present;
68extern unsigned long loops_per_jiffy; 79extern unsigned long loops_per_jiffy;
69 80
@@ -83,6 +94,8 @@ extern void init_mmu(void);
83static inline void init_mmu(void) { } 94static inline void init_mmu(void) { }
84#endif 95#endif
85 96
97extern int mem_reserve(unsigned long, unsigned long, int);
98extern void bootmem_init(void);
86extern void zones_init(void); 99extern void zones_init(void);
87 100
88/* 101/*
@@ -104,28 +117,33 @@ typedef struct tagtable {
104 117
105/* parse current tag */ 118/* parse current tag */
106 119
107static int __init parse_tag_mem(const bp_tag_t *tag) 120static int __init add_sysmem_bank(unsigned long type, unsigned long start,
121 unsigned long end)
108{ 122{
109 meminfo_t *mi = (meminfo_t*)(tag->data);
110
111 if (mi->type != MEMORY_TYPE_CONVENTIONAL)
112 return -1;
113
114 if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) { 123 if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) {
115 printk(KERN_WARNING 124 printk(KERN_WARNING
116 "Ignoring memory bank 0x%08lx size %ldKB\n", 125 "Ignoring memory bank 0x%08lx size %ldKB\n",
117 (unsigned long)mi->start, 126 start, end - start);
118 (unsigned long)mi->end - (unsigned long)mi->start);
119 return -EINVAL; 127 return -EINVAL;
120 } 128 }
121 sysmem.bank[sysmem.nr_banks].type = mi->type; 129 sysmem.bank[sysmem.nr_banks].type = type;
122 sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(mi->start); 130 sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(start);
123 sysmem.bank[sysmem.nr_banks].end = mi->end & PAGE_MASK; 131 sysmem.bank[sysmem.nr_banks].end = end & PAGE_MASK;
124 sysmem.nr_banks++; 132 sysmem.nr_banks++;
125 133
126 return 0; 134 return 0;
127} 135}
128 136
137static int __init parse_tag_mem(const bp_tag_t *tag)
138{
139 meminfo_t *mi = (meminfo_t *)(tag->data);
140
141 if (mi->type != MEMORY_TYPE_CONVENTIONAL)
142 return -1;
143
144 return add_sysmem_bank(mi->type, mi->start, mi->end);
145}
146
129__tagtable(BP_TAG_MEMORY, parse_tag_mem); 147__tagtable(BP_TAG_MEMORY, parse_tag_mem);
130 148
131#ifdef CONFIG_BLK_DEV_INITRD 149#ifdef CONFIG_BLK_DEV_INITRD
@@ -142,12 +160,31 @@ static int __init parse_tag_initrd(const bp_tag_t* tag)
142 160
143__tagtable(BP_TAG_INITRD, parse_tag_initrd); 161__tagtable(BP_TAG_INITRD, parse_tag_initrd);
144 162
163#ifdef CONFIG_OF
164
165static int __init parse_tag_fdt(const bp_tag_t *tag)
166{
167 dtb_start = (void *)(tag->data[0]);
168 return 0;
169}
170
171__tagtable(BP_TAG_FDT, parse_tag_fdt);
172
173void __init early_init_dt_setup_initrd_arch(unsigned long start,
174 unsigned long end)
175{
176 initrd_start = (void *)__va(start);
177 initrd_end = (void *)__va(end);
178 initrd_below_start_ok = 1;
179}
180
181#endif /* CONFIG_OF */
182
145#endif /* CONFIG_BLK_DEV_INITRD */ 183#endif /* CONFIG_BLK_DEV_INITRD */
146 184
147static int __init parse_tag_cmdline(const bp_tag_t* tag) 185static int __init parse_tag_cmdline(const bp_tag_t* tag)
148{ 186{
149 strncpy(command_line, (char*)(tag->data), COMMAND_LINE_SIZE); 187 strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
150 command_line[COMMAND_LINE_SIZE - 1] = '\0';
151 return 0; 188 return 0;
152} 189}
153 190
@@ -185,6 +222,58 @@ static int __init parse_bootparam(const bp_tag_t* tag)
185 return 0; 222 return 0;
186} 223}
187 224
225#ifdef CONFIG_OF
226
227void __init early_init_dt_add_memory_arch(u64 base, u64 size)
228{
229 size &= PAGE_MASK;
230 add_sysmem_bank(MEMORY_TYPE_CONVENTIONAL, base, base + size);
231}
232
233void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
234{
235 return __alloc_bootmem(size, align, 0);
236}
237
238void __init early_init_devtree(void *params)
239{
240 /* Setup flat device-tree pointer */
241 initial_boot_params = params;
242
243 /* Retrieve various informations from the /chosen node of the
244 * device-tree, including the platform type, initrd location and
245 * size, TCE reserve, and more ...
246 */
247 if (!command_line[0])
248 of_scan_flat_dt(early_init_dt_scan_chosen, command_line);
249
250 /* Scan memory nodes and rebuild MEMBLOCKs */
251 of_scan_flat_dt(early_init_dt_scan_root, NULL);
252 if (sysmem.nr_banks == 0)
253 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
254}
255
256static void __init copy_devtree(void)
257{
258 void *alloc = early_init_dt_alloc_memory_arch(
259 be32_to_cpu(initial_boot_params->totalsize), 0);
260 if (alloc) {
261 memcpy(alloc, initial_boot_params,
262 be32_to_cpu(initial_boot_params->totalsize));
263 initial_boot_params = alloc;
264 }
265}
266
267static int __init xtensa_device_probe(void)
268{
269 of_platform_populate(NULL, NULL, NULL, NULL);
270 return 0;
271}
272
273device_initcall(xtensa_device_probe);
274
275#endif /* CONFIG_OF */
276
188/* 277/*
189 * Initialize architecture. (Early stage) 278 * Initialize architecture. (Early stage)
190 */ 279 */
@@ -193,14 +282,14 @@ void __init init_arch(bp_tag_t *bp_start)
193{ 282{
194 sysmem.nr_banks = 0; 283 sysmem.nr_banks = 0;
195 284
196#ifdef CONFIG_CMDLINE_BOOL
197 strcpy(command_line, default_command_line);
198#endif
199
200 /* Parse boot parameters */ 285 /* Parse boot parameters */
201 286
202 if (bp_start) 287 if (bp_start)
203 parse_bootparam(bp_start); 288 parse_bootparam(bp_start);
289
290#ifdef CONFIG_OF
291 early_init_devtree(dtb_start);
292#endif
204 293
205 if (sysmem.nr_banks == 0) { 294 if (sysmem.nr_banks == 0) {
206 sysmem.nr_banks = 1; 295 sysmem.nr_banks = 1;
@@ -209,6 +298,11 @@ void __init init_arch(bp_tag_t *bp_start)
209 + PLATFORM_DEFAULT_MEM_SIZE; 298 + PLATFORM_DEFAULT_MEM_SIZE;
210 } 299 }
211 300
301#ifdef CONFIG_CMDLINE_BOOL
302 if (!command_line[0])
303 strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE);
304#endif
305
212 /* Early hook for platforms */ 306 /* Early hook for platforms */
213 307
214 platform_init(bp_start); 308 platform_init(bp_start);
@@ -235,15 +329,130 @@ extern char _UserExceptionVector_text_end;
235extern char _DoubleExceptionVector_literal_start; 329extern char _DoubleExceptionVector_literal_start;
236extern char _DoubleExceptionVector_text_end; 330extern char _DoubleExceptionVector_text_end;
237 331
238void __init setup_arch(char **cmdline_p) 332
333#ifdef CONFIG_S32C1I_SELFTEST
334#if XCHAL_HAVE_S32C1I
335
336static int __initdata rcw_word, rcw_probe_pc, rcw_exc;
337
338/*
339 * Basic atomic compare-and-swap, that records PC of S32C1I for probing.
340 *
341 * If *v == cmp, set *v = set. Return previous *v.
342 */
343static inline int probed_compare_swap(int *v, int cmp, int set)
344{
345 int tmp;
346
347 __asm__ __volatile__(
348 " movi %1, 1f\n"
349 " s32i %1, %4, 0\n"
350 " wsr %2, scompare1\n"
351 "1: s32c1i %0, %3, 0\n"
352 : "=a" (set), "=&a" (tmp)
353 : "a" (cmp), "a" (v), "a" (&rcw_probe_pc), "0" (set)
354 : "memory"
355 );
356 return set;
357}
358
359/* Handle probed exception */
360
361void __init do_probed_exception(struct pt_regs *regs, unsigned long exccause)
362{
363 if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
364 regs->pc += 3; /* skip the s32c1i instruction */
365 rcw_exc = exccause;
366 } else {
367 do_unhandled(regs, exccause);
368 }
369}
370
371/* Simple test of S32C1I (soc bringup assist) */
372
373void __init check_s32c1i(void)
374{
375 int n, cause1, cause2;
376 void *handbus, *handdata, *handaddr; /* temporarily saved handlers */
377
378 rcw_probe_pc = 0;
379 handbus = trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR,
380 do_probed_exception);
381 handdata = trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR,
382 do_probed_exception);
383 handaddr = trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR,
384 do_probed_exception);
385
386 /* First try an S32C1I that does not store: */
387 rcw_exc = 0;
388 rcw_word = 1;
389 n = probed_compare_swap(&rcw_word, 0, 2);
390 cause1 = rcw_exc;
391
392 /* took exception? */
393 if (cause1 != 0) {
394 /* unclean exception? */
395 if (n != 2 || rcw_word != 1)
396 panic("S32C1I exception error");
397 } else if (rcw_word != 1 || n != 1) {
398 panic("S32C1I compare error");
399 }
400
401 /* Then an S32C1I that stores: */
402 rcw_exc = 0;
403 rcw_word = 0x1234567;
404 n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde);
405 cause2 = rcw_exc;
406
407 if (cause2 != 0) {
408 /* unclean exception? */
409 if (n != 0xabcde || rcw_word != 0x1234567)
410 panic("S32C1I exception error (b)");
411 } else if (rcw_word != 0xabcde || n != 0x1234567) {
412 panic("S32C1I store error");
413 }
414
415 /* Verify consistency of exceptions: */
416 if (cause1 || cause2) {
417 pr_warn("S32C1I took exception %d, %d\n", cause1, cause2);
418 /* If emulation of S32C1I upon bus error gets implemented,
419 we can get rid of this panic for single core (not SMP) */
420 panic("S32C1I exceptions not currently supported");
421 }
422 if (cause1 != cause2)
423 panic("inconsistent S32C1I exceptions");
424
425 trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus);
426 trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata);
427 trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr);
428}
429
430#else /* XCHAL_HAVE_S32C1I */
431
432/* This condition should not occur with a commercially deployed processor.
433 Display reminder for early engr test or demo chips / FPGA bitstreams */
434void __init check_s32c1i(void)
435{
436 pr_warn("Processor configuration lacks atomic compare-and-swap support!\n");
437}
438
439#endif /* XCHAL_HAVE_S32C1I */
440#else /* CONFIG_S32C1I_SELFTEST */
441
442void __init check_s32c1i(void)
239{ 443{
240 extern int mem_reserve(unsigned long, unsigned long, int); 444}
241 extern void bootmem_init(void); 445
446#endif /* CONFIG_S32C1I_SELFTEST */
242 447
243 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); 448
244 boot_command_line[COMMAND_LINE_SIZE-1] = '\0'; 449void __init setup_arch(char **cmdline_p)
450{
451 strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
245 *cmdline_p = command_line; 452 *cmdline_p = command_line;
246 453
454 check_s32c1i();
455
247 /* Reserve some memory regions */ 456 /* Reserve some memory regions */
248 457
249#ifdef CONFIG_BLK_DEV_INITRD 458#ifdef CONFIG_BLK_DEV_INITRD
@@ -251,7 +460,7 @@ void __init setup_arch(char **cmdline_p)
251 initrd_is_mapped = mem_reserve(__pa(initrd_start), 460 initrd_is_mapped = mem_reserve(__pa(initrd_start),
252 __pa(initrd_end), 0); 461 __pa(initrd_end), 0);
253 initrd_below_start_ok = 1; 462 initrd_below_start_ok = 1;
254 } else { 463 } else {
255 initrd_start = 0; 464 initrd_start = 0;
256 } 465 }
257#endif 466#endif
@@ -275,8 +484,12 @@ void __init setup_arch(char **cmdline_p)
275 484
276 bootmem_init(); 485 bootmem_init();
277 486
278 platform_setup(cmdline_p); 487#ifdef CONFIG_OF
488 copy_devtree();
489 unflatten_device_tree();
490#endif
279 491
492 platform_setup(cmdline_p);
280 493
281 paging_init(); 494 paging_init();
282 zones_init(); 495 zones_init();
@@ -326,7 +539,7 @@ c_show(struct seq_file *f, void *slot)
326 "core ID\t\t: " XCHAL_CORE_ID "\n" 539 "core ID\t\t: " XCHAL_CORE_ID "\n"
327 "build ID\t: 0x%x\n" 540 "build ID\t: 0x%x\n"
328 "byte order\t: %s\n" 541 "byte order\t: %s\n"
329 "cpu MHz\t\t: %lu.%02lu\n" 542 "cpu MHz\t\t: %lu.%02lu\n"
330 "bogomips\t: %lu.%02lu\n", 543 "bogomips\t: %lu.%02lu\n",
331 XCHAL_BUILD_UNIQUE_ID, 544 XCHAL_BUILD_UNIQUE_ID,
332 XCHAL_HAVE_BE ? "big" : "little", 545 XCHAL_HAVE_BE ? "big" : "little",
@@ -381,6 +594,9 @@ c_show(struct seq_file *f, void *slot)
381#if XCHAL_HAVE_FP 594#if XCHAL_HAVE_FP
382 "fpu " 595 "fpu "
383#endif 596#endif
597#if XCHAL_HAVE_S32C1I
598 "s32c1i "
599#endif
384 "\n"); 600 "\n");
385 601
386 /* Registers. */ 602 /* Registers. */
@@ -412,7 +628,7 @@ c_show(struct seq_file *f, void *slot)
412 "icache size\t: %d\n" 628 "icache size\t: %d\n"
413 "icache flags\t: " 629 "icache flags\t: "
414#if XCHAL_ICACHE_LINE_LOCKABLE 630#if XCHAL_ICACHE_LINE_LOCKABLE
415 "lock" 631 "lock "
416#endif 632#endif
417 "\n" 633 "\n"
418 "dcache line size: %d\n" 634 "dcache line size: %d\n"
@@ -420,10 +636,10 @@ c_show(struct seq_file *f, void *slot)
420 "dcache size\t: %d\n" 636 "dcache size\t: %d\n"
421 "dcache flags\t: " 637 "dcache flags\t: "
422#if XCHAL_DCACHE_IS_WRITEBACK 638#if XCHAL_DCACHE_IS_WRITEBACK
423 "writeback" 639 "writeback "
424#endif 640#endif
425#if XCHAL_DCACHE_LINE_LOCKABLE 641#if XCHAL_DCACHE_LINE_LOCKABLE
426 "lock" 642 "lock "
427#endif 643#endif
428 "\n", 644 "\n",
429 XCHAL_ICACHE_LINESIZE, 645 XCHAL_ICACHE_LINESIZE,
@@ -465,4 +681,3 @@ const struct seq_operations cpuinfo_op =
465}; 681};
466 682
467#endif /* CONFIG_PROC_FS */ 683#endif /* CONFIG_PROC_FS */
468
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 63c566f627bc..de34d6be91cd 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -212,7 +212,7 @@ restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
212 if (err) 212 if (err)
213 return err; 213 return err;
214 214
215 /* The signal handler may have used coprocessors in which 215 /* The signal handler may have used coprocessors in which
216 * case they are still enabled. We disable them to force a 216 * case they are still enabled. We disable them to force a
217 * reloading of the original task's CP state by the lazy 217 * reloading of the original task's CP state by the lazy
218 * context-switching mechanisms of CP exception handling. 218 * context-switching mechanisms of CP exception handling.
@@ -396,7 +396,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
396 */ 396 */
397 397
398 /* Set up registers for signal handler */ 398 /* Set up registers for signal handler */
399 start_thread(regs, (unsigned long) ka->sa.sa_handler, 399 start_thread(regs, (unsigned long) ka->sa.sa_handler,
400 (unsigned long) frame); 400 (unsigned long) frame);
401 401
402 /* Set up a stack frame for a call4 402 /* Set up a stack frame for a call4
@@ -424,9 +424,9 @@ give_sigsegv:
424 return -EFAULT; 424 return -EFAULT;
425} 425}
426 426
427asmlinkage long xtensa_sigaltstack(const stack_t __user *uss, 427asmlinkage long xtensa_sigaltstack(const stack_t __user *uss,
428 stack_t __user *uoss, 428 stack_t __user *uoss,
429 long a2, long a3, long a4, long a5, 429 long a2, long a3, long a4, long a5,
430 struct pt_regs *regs) 430 struct pt_regs *regs)
431{ 431{
432 return do_sigaltstack(uss, uoss, regs->areg[1]); 432 return do_sigaltstack(uss, uoss, regs->areg[1]);
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 5702065f472a..54fa8425cee2 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -52,4 +52,3 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice,
52{ 52{
53 return sys_fadvise64_64(fd, offset, len, advice); 53 return sys_fadvise64_64(fd, offset, len, advice);
54} 54}
55
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index ac62f9cf1e10..ffb474104311 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -22,6 +22,7 @@
22#include <linux/irq.h> 22#include <linux/irq.h>
23#include <linux/profile.h> 23#include <linux/profile.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/irqdomain.h>
25 26
26#include <asm/timex.h> 27#include <asm/timex.h>
27#include <asm/platform.h> 28#include <asm/platform.h>
@@ -31,7 +32,7 @@ unsigned long ccount_per_jiffy; /* per 1/HZ */
31unsigned long nsec_per_ccount; /* nsec per ccount increment */ 32unsigned long nsec_per_ccount; /* nsec per ccount increment */
32#endif 33#endif
33 34
34static cycle_t ccount_read(void) 35static cycle_t ccount_read(struct clocksource *cs)
35{ 36{
36 return (cycle_t)get_ccount(); 37 return (cycle_t)get_ccount();
37} 38}
@@ -52,6 +53,7 @@ static struct irqaction timer_irqaction = {
52 53
53void __init time_init(void) 54void __init time_init(void)
54{ 55{
56 unsigned int irq;
55#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT 57#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
56 printk("Calibrating CPU frequency "); 58 printk("Calibrating CPU frequency ");
57 platform_calibrate_ccount(); 59 platform_calibrate_ccount();
@@ -62,7 +64,8 @@ void __init time_init(void)
62 64
63 /* Initialize the linux timer interrupt. */ 65 /* Initialize the linux timer interrupt. */
64 66
65 setup_irq(LINUX_TIMER_INT, &timer_irqaction); 67 irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
68 setup_irq(irq, &timer_irqaction);
66 set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY); 69 set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY);
67} 70}
68 71
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 5caf2b64d43a..01e0111bf787 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -293,6 +293,17 @@ do_debug(struct pt_regs *regs)
293} 293}
294 294
295 295
296/* Set exception C handler - for temporary use when probing exceptions */
297
298void * __init trap_set_handler(int cause, void *handler)
299{
300 unsigned long *entry = &exc_table[EXC_TABLE_DEFAULT / 4 + cause];
301 void *previous = (void *)*entry;
302 *entry = (unsigned long)handler;
303 return previous;
304}
305
306
296/* 307/*
297 * Initialize dispatch tables. 308 * Initialize dispatch tables.
298 * 309 *
@@ -397,7 +408,8 @@ static inline void spill_registers(void)
397 "wsr a13, sar\n\t" 408 "wsr a13, sar\n\t"
398 "wsr a14, ps\n\t" 409 "wsr a14, ps\n\t"
399 :: "a" (&a0), "a" (&ps) 410 :: "a" (&a0), "a" (&ps)
400 : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory"); 411 : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
412 "memory");
401} 413}
402 414
403void show_trace(struct task_struct *task, unsigned long *sp) 415void show_trace(struct task_struct *task, unsigned long *sp)
@@ -452,7 +464,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
452 464
453 if (!sp) 465 if (!sp)
454 sp = stack_pointer(task); 466 sp = stack_pointer(task);
455 stack = sp; 467 stack = sp;
456 468
457 printk("\nStack: "); 469 printk("\nStack: ");
458 470
@@ -523,5 +535,3 @@ void die(const char * str, struct pt_regs * regs, long err)
523 535
524 do_exit(err); 536 do_exit(err);
525} 537}
526
527
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 4462c1e595c2..68df35f66ce3 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -79,6 +79,8 @@ ENTRY(_UserExceptionVector)
79 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 79 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
80 jx a0 80 jx a0
81 81
82ENDPROC(_UserExceptionVector)
83
82/* 84/*
83 * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0) 85 * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
84 * 86 *
@@ -103,6 +105,7 @@ ENTRY(_KernelExceptionVector)
103 l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address 105 l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
104 jx a0 106 jx a0
105 107
108ENDPROC(_KernelExceptionVector)
106 109
107/* 110/*
108 * Double exception vector (Exceptions with PS.EXCM == 1) 111 * Double exception vector (Exceptions with PS.EXCM == 1)
@@ -225,7 +228,13 @@ ENTRY(_DoubleExceptionVector)
225 /* Window overflow/underflow exception. Get stack pointer. */ 228 /* Window overflow/underflow exception. Get stack pointer. */
226 229
227 mov a3, a2 230 mov a3, a2
228 movi a2, exc_table 231 /* This explicit literal and the following references to it are made
232 * in order to fit DoubleExceptionVector.literals into the available
233 * 16-byte gap before DoubleExceptionVector.text in the absence of
234 * link time relaxation. See kernel/vmlinux.lds.S
235 */
236 .literal .Lexc_table, exc_table
237 l32r a2, .Lexc_table
229 l32i a2, a2, EXC_TABLE_KSTK 238 l32i a2, a2, EXC_TABLE_KSTK
230 239
231 /* Check for overflow/underflow exception, jump if overflow. */ 240 /* Check for overflow/underflow exception, jump if overflow. */
@@ -255,7 +264,7 @@ ENTRY(_DoubleExceptionVector)
255 s32i a0, a2, PT_AREG0 264 s32i a0, a2, PT_AREG0
256 265
257 wsr a3, excsave1 # save a3 266 wsr a3, excsave1 # save a3
258 movi a3, exc_table 267 l32r a3, .Lexc_table
259 268
260 rsr a0, exccause 269 rsr a0, exccause
261 s32i a0, a2, PT_DEPC # mark it as a regular exception 270 s32i a0, a2, PT_DEPC # mark it as a regular exception
@@ -267,7 +276,7 @@ ENTRY(_DoubleExceptionVector)
267 276
268 /* a0: depc, a1: a1, a2: a2, a3: trashed, depc: a0, excsave1: a3 */ 277 /* a0: depc, a1: a1, a2: a2, a3: trashed, depc: a0, excsave1: a3 */
269 278
270 movi a3, exc_table 279 l32r a3, .Lexc_table
271 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE # temporary variable 280 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE # temporary variable
272 281
273 /* Enter critical section. */ 282 /* Enter critical section. */
@@ -296,7 +305,7 @@ ENTRY(_DoubleExceptionVector)
296 305
297 /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */ 306 /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */
298 307
299 movi a3, exc_table 308 l32r a3, .Lexc_table
300 rsr a0, exccause 309 rsr a0, exccause
301 addx4 a0, a0, a3 310 addx4 a0, a0, a3
302 l32i a0, a0, EXC_TABLE_FAST_USER 311 l32i a0, a0, EXC_TABLE_FAST_USER
@@ -338,6 +347,7 @@ ENTRY(_DoubleExceptionVector)
338 347
339 .end literal_prefix 348 .end literal_prefix
340 349
350ENDPROC(_DoubleExceptionVector)
341 351
342/* 352/*
343 * Debug interrupt vector 353 * Debug interrupt vector
@@ -349,9 +359,11 @@ ENTRY(_DoubleExceptionVector)
349 .section .DebugInterruptVector.text, "ax" 359 .section .DebugInterruptVector.text, "ax"
350 360
351ENTRY(_DebugInterruptVector) 361ENTRY(_DebugInterruptVector)
362
352 xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 363 xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
353 jx a0 364 jx a0
354 365
366ENDPROC(_DebugInterruptVector)
355 367
356 368
357/* Window overflow and underflow handlers. 369/* Window overflow and underflow handlers.
@@ -363,38 +375,43 @@ ENTRY(_DebugInterruptVector)
363 * we try to access any page that would cause a page fault early. 375 * we try to access any page that would cause a page fault early.
364 */ 376 */
365 377
378#define ENTRY_ALIGN64(name) \
379 .globl name; \
380 .align 64; \
381 name:
382
366 .section .WindowVectors.text, "ax" 383 .section .WindowVectors.text, "ax"
367 384
368 385
369/* 4-Register Window Overflow Vector (Handler) */ 386/* 4-Register Window Overflow Vector (Handler) */
370 387
371 .align 64 388ENTRY_ALIGN64(_WindowOverflow4)
372.global _WindowOverflow4 389
373_WindowOverflow4:
374 s32e a0, a5, -16 390 s32e a0, a5, -16
375 s32e a1, a5, -12 391 s32e a1, a5, -12
376 s32e a2, a5, -8 392 s32e a2, a5, -8
377 s32e a3, a5, -4 393 s32e a3, a5, -4
378 rfwo 394 rfwo
379 395
396ENDPROC(_WindowOverflow4)
397
380 398
381/* 4-Register Window Underflow Vector (Handler) */ 399/* 4-Register Window Underflow Vector (Handler) */
382 400
383 .align 64 401ENTRY_ALIGN64(_WindowUnderflow4)
384.global _WindowUnderflow4 402
385_WindowUnderflow4:
386 l32e a0, a5, -16 403 l32e a0, a5, -16
387 l32e a1, a5, -12 404 l32e a1, a5, -12
388 l32e a2, a5, -8 405 l32e a2, a5, -8
389 l32e a3, a5, -4 406 l32e a3, a5, -4
390 rfwu 407 rfwu
391 408
409ENDPROC(_WindowUnderflow4)
392 410
393/* 8-Register Window Overflow Vector (Handler) */ 411/* 8-Register Window Overflow Vector (Handler) */
394 412
395 .align 64 413ENTRY_ALIGN64(_WindowOverflow8)
396.global _WindowOverflow8 414
397_WindowOverflow8:
398 s32e a0, a9, -16 415 s32e a0, a9, -16
399 l32e a0, a1, -12 416 l32e a0, a1, -12
400 s32e a2, a9, -8 417 s32e a2, a9, -8
@@ -406,11 +423,12 @@ _WindowOverflow8:
406 s32e a7, a0, -20 423 s32e a7, a0, -20
407 rfwo 424 rfwo
408 425
426ENDPROC(_WindowOverflow8)
427
409/* 8-Register Window Underflow Vector (Handler) */ 428/* 8-Register Window Underflow Vector (Handler) */
410 429
411 .align 64 430ENTRY_ALIGN64(_WindowUnderflow8)
412.global _WindowUnderflow8 431
413_WindowUnderflow8:
414 l32e a1, a9, -12 432 l32e a1, a9, -12
415 l32e a0, a9, -16 433 l32e a0, a9, -16
416 l32e a7, a1, -12 434 l32e a7, a1, -12
@@ -422,12 +440,12 @@ _WindowUnderflow8:
422 l32e a7, a7, -20 440 l32e a7, a7, -20
423 rfwu 441 rfwu
424 442
443ENDPROC(_WindowUnderflow8)
425 444
426/* 12-Register Window Overflow Vector (Handler) */ 445/* 12-Register Window Overflow Vector (Handler) */
427 446
428 .align 64 447ENTRY_ALIGN64(_WindowOverflow12)
429.global _WindowOverflow12 448
430_WindowOverflow12:
431 s32e a0, a13, -16 449 s32e a0, a13, -16
432 l32e a0, a1, -12 450 l32e a0, a1, -12
433 s32e a1, a13, -12 451 s32e a1, a13, -12
@@ -443,11 +461,12 @@ _WindowOverflow12:
443 s32e a11, a0, -20 461 s32e a11, a0, -20
444 rfwo 462 rfwo
445 463
464ENDPROC(_WindowOverflow12)
465
446/* 12-Register Window Underflow Vector (Handler) */ 466/* 12-Register Window Underflow Vector (Handler) */
447 467
448 .align 64 468ENTRY_ALIGN64(_WindowUnderflow12)
449.global _WindowUnderflow12 469
450_WindowUnderflow12:
451 l32e a1, a13, -12 470 l32e a1, a13, -12
452 l32e a0, a13, -16 471 l32e a0, a13, -16
453 l32e a11, a1, -12 472 l32e a11, a1, -12
@@ -463,6 +482,6 @@ _WindowUnderflow12:
463 l32e a11, a11, -20 482 l32e a11, a11, -20
464 rfwu 483 rfwu
465 484
466 .text 485ENDPROC(_WindowUnderflow12)
467
468 486
487 .text
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S
index df397f932d0e..4eb573d2720e 100644
--- a/arch/xtensa/lib/checksum.S
+++ b/arch/xtensa/lib/checksum.S
@@ -41,10 +41,11 @@
41 41
42.text 42.text
43ENTRY(csum_partial) 43ENTRY(csum_partial)
44 /* 44
45 * Experiments with Ethernet and SLIP connections show that buf 45 /*
46 * is aligned on either a 2-byte or 4-byte boundary. 46 * Experiments with Ethernet and SLIP connections show that buf
47 */ 47 * is aligned on either a 2-byte or 4-byte boundary.
48 */
48 entry sp, 32 49 entry sp, 32
49 extui a5, a2, 0, 2 50 extui a5, a2, 0, 2
50 bnez a5, 8f /* branch if 2-byte aligned */ 51 bnez a5, 8f /* branch if 2-byte aligned */
@@ -170,7 +171,7 @@ ENTRY(csum_partial)
1703: 1713:
171 j 5b /* branch to handle the remaining byte */ 172 j 5b /* branch to handle the remaining byte */
172 173
173 174ENDPROC(csum_partial)
174 175
175/* 176/*
176 * Copy from ds while checksumming, otherwise like csum_partial 177 * Copy from ds while checksumming, otherwise like csum_partial
@@ -211,6 +212,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
211 */ 212 */
212 213
213ENTRY(csum_partial_copy_generic) 214ENTRY(csum_partial_copy_generic)
215
214 entry sp, 32 216 entry sp, 32
215 mov a12, a3 217 mov a12, a3
216 mov a11, a4 218 mov a11, a4
@@ -367,6 +369,8 @@ DST( s8i a8, a3, 1 )
3676: 3696:
368 j 4b /* process the possible trailing odd byte */ 370 j 4b /* process the possible trailing odd byte */
369 371
372ENDPROC(csum_partial_copy_generic)
373
370 374
371# Exception handler: 375# Exception handler:
372.section .fixup, "ax" 376.section .fixup, "ax"
@@ -406,4 +410,3 @@ DST( s8i a8, a3, 1 )
406 retw 410 retw
407 411
408.previous 412.previous
409
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S
index c48b80acb5f0..b1c219acabe7 100644
--- a/arch/xtensa/lib/memcopy.S
+++ b/arch/xtensa/lib/memcopy.S
@@ -210,8 +210,10 @@ memcpy:
210 _beqz a4, .Ldone # avoid loading anything for zero-length copies 210 _beqz a4, .Ldone # avoid loading anything for zero-length copies
211 # copy 16 bytes per iteration for word-aligned dst and unaligned src 211 # copy 16 bytes per iteration for word-aligned dst and unaligned src
212 ssa8 a3 # set shift amount from byte offset 212 ssa8 a3 # set shift amount from byte offset
213#define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS (simulator) with the 213
214 lint or ferret client, or 0 to save a few cycles */ 214/* set to 1 when running on ISS (simulator) with the
215 lint or ferret client, or 0 to save a few cycles */
216#define SIM_CHECKS_ALIGNMENT 1
215#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT 217#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
216 and a11, a3, a8 # save unalignment offset for below 218 and a11, a3, a8 # save unalignment offset for below
217 sub a3, a3, a11 # align a3 219 sub a3, a3, a11 # align a3
diff --git a/arch/xtensa/lib/pci-auto.c b/arch/xtensa/lib/pci-auto.c
index a71733ae1193..34d05abbd921 100644
--- a/arch/xtensa/lib/pci-auto.c
+++ b/arch/xtensa/lib/pci-auto.c
@@ -241,8 +241,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
241 unsigned char header_type; 241 unsigned char header_type;
242 struct pci_dev *dev = &pciauto_dev; 242 struct pci_dev *dev = &pciauto_dev;
243 243
244 pciauto_dev.bus = &pciauto_bus; 244 pciauto_dev.bus = &pciauto_bus;
245 pciauto_dev.sysdata = pci_ctrl; 245 pciauto_dev.sysdata = pci_ctrl;
246 pciauto_bus.ops = pci_ctrl->ops; 246 pciauto_bus.ops = pci_ctrl->ops;
247 247
248 /* 248 /*
@@ -345,8 +345,3 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
345 } 345 }
346 return sub_bus; 346 return sub_bus;
347} 347}
348
349
350
351
352
diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S
index 9f603cdaaa68..1ad0ecf45368 100644
--- a/arch/xtensa/lib/strncpy_user.S
+++ b/arch/xtensa/lib/strncpy_user.S
@@ -166,7 +166,7 @@ __strncpy_user:
166 retw 166 retw
167.Lz1: # byte 1 is zero 167.Lz1: # byte 1 is zero
168#ifdef __XTENSA_EB__ 168#ifdef __XTENSA_EB__
169 extui a9, a9, 16, 16 169 extui a9, a9, 16, 16
170#endif /* __XTENSA_EB__ */ 170#endif /* __XTENSA_EB__ */
171 EX(s16i, a9, a11, 0, fixup_s) 171 EX(s16i, a9, a11, 0, fixup_s)
172 addi a11, a11, 1 # advance dst pointer 172 addi a11, a11, 1 # advance dst pointer
@@ -174,7 +174,7 @@ __strncpy_user:
174 retw 174 retw
175.Lz2: # byte 2 is zero 175.Lz2: # byte 2 is zero
176#ifdef __XTENSA_EB__ 176#ifdef __XTENSA_EB__
177 extui a9, a9, 16, 16 177 extui a9, a9, 16, 16
178#endif /* __XTENSA_EB__ */ 178#endif /* __XTENSA_EB__ */
179 EX(s16i, a9, a11, 0, fixup_s) 179 EX(s16i, a9, a11, 0, fixup_s)
180 movi a9, 0 180 movi a9, 0
diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S
index 23f2a89816a1..4c03b1e581e9 100644
--- a/arch/xtensa/lib/strnlen_user.S
+++ b/arch/xtensa/lib/strnlen_user.S
@@ -145,4 +145,3 @@ __strnlen_user:
145lenfixup: 145lenfixup:
146 movi a2, 0 146 movi a2, 0
147 retw 147 retw
148
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S
index 46d60314bb16..ace1892a875e 100644
--- a/arch/xtensa/lib/usercopy.S
+++ b/arch/xtensa/lib/usercopy.S
@@ -318,4 +318,3 @@ l_fixup:
318 /* Ignore memset return value in a6. */ 318 /* Ignore memset return value in a6. */
319 /* a2 still contains bytes not copied. */ 319 /* a2 still contains bytes not copied. */
320 retw 320 retw
321
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 85df4655d326..81edeab82d17 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -118,7 +118,7 @@ void flush_dcache_page(struct page *page)
118 * For now, flush the whole cache. FIXME?? 118 * For now, flush the whole cache. FIXME??
119 */ 119 */
120 120
121void flush_cache_range(struct vm_area_struct* vma, 121void flush_cache_range(struct vm_area_struct* vma,
122 unsigned long start, unsigned long end) 122 unsigned long start, unsigned long end)
123{ 123{
124 __flush_invalidate_dcache_all(); 124 __flush_invalidate_dcache_all();
@@ -133,7 +133,7 @@ void flush_cache_range(struct vm_area_struct* vma,
133 */ 133 */
134 134
135void flush_cache_page(struct vm_area_struct* vma, unsigned long address, 135void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
136 unsigned long pfn) 136 unsigned long pfn)
137{ 137{
138 /* Note that we have to use the 'alias' address to avoid multi-hit */ 138 /* Note that we have to use the 'alias' address to avoid multi-hit */
139 139
@@ -166,14 +166,14 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
166 166
167 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { 167 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
168 168
169 unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
170 unsigned long paddr = (unsigned long) page_address(page); 169 unsigned long paddr = (unsigned long) page_address(page);
171 unsigned long phys = page_to_phys(page); 170 unsigned long phys = page_to_phys(page);
171 unsigned long tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
172 172
173 __flush_invalidate_dcache_page(paddr); 173 __flush_invalidate_dcache_page(paddr);
174 174
175 __flush_invalidate_dcache_page_alias(vaddr, phys); 175 __flush_invalidate_dcache_page_alias(tmp, phys);
176 __invalidate_icache_page_alias(vaddr, phys); 176 __invalidate_icache_page_alias(tmp, phys);
177 177
178 clear_bit(PG_arch_1, &page->flags); 178 clear_bit(PG_arch_1, &page->flags);
179 } 179 }
@@ -195,7 +195,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
195 195
196#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 196#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
197 197
198void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 198void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
199 unsigned long vaddr, void *dst, const void *src, 199 unsigned long vaddr, void *dst, const void *src,
200 unsigned long len) 200 unsigned long len)
201{ 201{
@@ -205,8 +205,8 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
205 /* Flush and invalidate user page if aliased. */ 205 /* Flush and invalidate user page if aliased. */
206 206
207 if (alias) { 207 if (alias) {
208 unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); 208 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
209 __flush_invalidate_dcache_page_alias(temp, phys); 209 __flush_invalidate_dcache_page_alias(t, phys);
210 } 210 }
211 211
212 /* Copy data */ 212 /* Copy data */
@@ -219,12 +219,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
219 */ 219 */
220 220
221 if (alias) { 221 if (alias) {
222 unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); 222 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
223 223
224 __flush_invalidate_dcache_range((unsigned long) dst, len); 224 __flush_invalidate_dcache_range((unsigned long) dst, len);
225 if ((vma->vm_flags & VM_EXEC) != 0) { 225 if ((vma->vm_flags & VM_EXEC) != 0)
226 __invalidate_icache_page_alias(temp, phys); 226 __invalidate_icache_page_alias(t, phys);
227 }
228 227
229 } else if ((vma->vm_flags & VM_EXEC) != 0) { 228 } else if ((vma->vm_flags & VM_EXEC) != 0) {
230 __flush_dcache_range((unsigned long)dst,len); 229 __flush_dcache_range((unsigned long)dst,len);
@@ -245,8 +244,8 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
245 */ 244 */
246 245
247 if (alias) { 246 if (alias) {
248 unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); 247 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
249 __flush_invalidate_dcache_page_alias(temp, phys); 248 __flush_invalidate_dcache_page_alias(t, phys);
250 } 249 }
251 250
252 memcpy(dst, src, len); 251 memcpy(dst, src, len);
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 245b08f7eaf4..4b7bc8db170f 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -254,4 +254,3 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
254 die("Oops", regs, sig); 254 die("Oops", regs, sig);
255 do_exit(sig); 255 do_exit(sig);
256} 256}
257
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index db955179da2d..7a5156ffebb6 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -75,15 +75,15 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
75 sysmem.nr_banks++; 75 sysmem.nr_banks++;
76 } 76 }
77 sysmem.bank[i].end = start; 77 sysmem.bank[i].end = start;
78
79 } else if (end < sysmem.bank[i].end) {
80 sysmem.bank[i].start = end;
81
78 } else { 82 } else {
79 if (end < sysmem.bank[i].end) 83 /* remove entry */
80 sysmem.bank[i].start = end; 84 sysmem.nr_banks--;
81 else { 85 sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
82 /* remove entry */ 86 sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
83 sysmem.nr_banks--;
84 sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
85 sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
86 }
87 } 87 }
88 return -1; 88 return -1;
89} 89}
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index b048406d8756..d97ed1ba7b0a 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -29,6 +29,7 @@
29 */ 29 */
30 30
31ENTRY(clear_page) 31ENTRY(clear_page)
32
32 entry a1, 16 33 entry a1, 16
33 34
34 movi a3, 0 35 movi a3, 0
@@ -45,6 +46,8 @@ ENTRY(clear_page)
45 46
46 retw 47 retw
47 48
49ENDPROC(clear_page)
50
48/* 51/*
49 * copy_page and copy_user_page are the same for non-cache-aliased configs. 52 * copy_page and copy_user_page are the same for non-cache-aliased configs.
50 * 53 *
@@ -53,6 +56,7 @@ ENTRY(clear_page)
53 */ 56 */
54 57
55ENTRY(copy_page) 58ENTRY(copy_page)
59
56 entry a1, 16 60 entry a1, 16
57 61
58 __loopi a2, a4, PAGE_SIZE, 32 62 __loopi a2, a4, PAGE_SIZE, 32
@@ -84,6 +88,8 @@ ENTRY(copy_page)
84 88
85 retw 89 retw
86 90
91ENDPROC(copy_page)
92
87#ifdef CONFIG_MMU 93#ifdef CONFIG_MMU
88/* 94/*
89 * If we have to deal with cache aliasing, we use temporary memory mappings 95 * If we have to deal with cache aliasing, we use temporary memory mappings
@@ -109,6 +115,7 @@ ENTRY(__tlbtemp_mapping_start)
109 */ 115 */
110 116
111ENTRY(clear_user_page) 117ENTRY(clear_user_page)
118
112 entry a1, 32 119 entry a1, 32
113 120
114 /* Mark page dirty and determine alias. */ 121 /* Mark page dirty and determine alias. */
@@ -164,6 +171,8 @@ ENTRY(clear_user_page)
164 171
165 retw 172 retw
166 173
174ENDPROC(clear_user_page)
175
167/* 176/*
168 * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page) 177 * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page)
169 * a2 a3 a4 a5 178 * a2 a3 a4 a5
@@ -171,7 +180,7 @@ ENTRY(clear_user_page)
171 180
172ENTRY(copy_user_page) 181ENTRY(copy_user_page)
173 182
174 entry a1, 32 183 entry a1, 32
175 184
176 /* Mark page dirty and determine alias for destination. */ 185 /* Mark page dirty and determine alias for destination. */
177 186
@@ -262,6 +271,8 @@ ENTRY(copy_user_page)
262 271
263 retw 272 retw
264 273
274ENDPROC(copy_user_page)
275
265#endif 276#endif
266 277
267#if (DCACHE_WAY_SIZE > PAGE_SIZE) 278#if (DCACHE_WAY_SIZE > PAGE_SIZE)
@@ -272,6 +283,7 @@ ENTRY(copy_user_page)
272 */ 283 */
273 284
274ENTRY(__flush_invalidate_dcache_page_alias) 285ENTRY(__flush_invalidate_dcache_page_alias)
286
275 entry sp, 16 287 entry sp, 16
276 288
277 movi a7, 0 # required for exception handler 289 movi a7, 0 # required for exception handler
@@ -287,6 +299,7 @@ ENTRY(__flush_invalidate_dcache_page_alias)
287 299
288 retw 300 retw
289 301
302ENDPROC(__flush_invalidate_dcache_page_alias)
290#endif 303#endif
291 304
292ENTRY(__tlbtemp_mapping_itlb) 305ENTRY(__tlbtemp_mapping_itlb)
@@ -294,6 +307,7 @@ ENTRY(__tlbtemp_mapping_itlb)
294#if (ICACHE_WAY_SIZE > PAGE_SIZE) 307#if (ICACHE_WAY_SIZE > PAGE_SIZE)
295 308
296ENTRY(__invalidate_icache_page_alias) 309ENTRY(__invalidate_icache_page_alias)
310
297 entry sp, 16 311 entry sp, 16
298 312
299 addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE) 313 addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE)
@@ -307,11 +321,14 @@ ENTRY(__invalidate_icache_page_alias)
307 isync 321 isync
308 retw 322 retw
309 323
324ENDPROC(__invalidate_icache_page_alias)
325
310#endif 326#endif
311 327
312/* End of special treatment in tlb miss exception */ 328/* End of special treatment in tlb miss exception */
313 329
314ENTRY(__tlbtemp_mapping_end) 330ENTRY(__tlbtemp_mapping_end)
331
315#endif /* CONFIG_MMU 332#endif /* CONFIG_MMU
316 333
317/* 334/*
@@ -319,6 +336,7 @@ ENTRY(__tlbtemp_mapping_end)
319 */ 336 */
320 337
321ENTRY(__invalidate_icache_page) 338ENTRY(__invalidate_icache_page)
339
322 entry sp, 16 340 entry sp, 16
323 341
324 ___invalidate_icache_page a2 a3 342 ___invalidate_icache_page a2 a3
@@ -326,11 +344,14 @@ ENTRY(__invalidate_icache_page)
326 344
327 retw 345 retw
328 346
347ENDPROC(__invalidate_icache_page)
348
329/* 349/*
330 * void __invalidate_dcache_page(ulong start) 350 * void __invalidate_dcache_page(ulong start)
331 */ 351 */
332 352
333ENTRY(__invalidate_dcache_page) 353ENTRY(__invalidate_dcache_page)
354
334 entry sp, 16 355 entry sp, 16
335 356
336 ___invalidate_dcache_page a2 a3 357 ___invalidate_dcache_page a2 a3
@@ -338,11 +359,14 @@ ENTRY(__invalidate_dcache_page)
338 359
339 retw 360 retw
340 361
362ENDPROC(__invalidate_dcache_page)
363
341/* 364/*
342 * void __flush_invalidate_dcache_page(ulong start) 365 * void __flush_invalidate_dcache_page(ulong start)
343 */ 366 */
344 367
345ENTRY(__flush_invalidate_dcache_page) 368ENTRY(__flush_invalidate_dcache_page)
369
346 entry sp, 16 370 entry sp, 16
347 371
348 ___flush_invalidate_dcache_page a2 a3 372 ___flush_invalidate_dcache_page a2 a3
@@ -350,11 +374,14 @@ ENTRY(__flush_invalidate_dcache_page)
350 dsync 374 dsync
351 retw 375 retw
352 376
377ENDPROC(__flush_invalidate_dcache_page)
378
353/* 379/*
354 * void __flush_dcache_page(ulong start) 380 * void __flush_dcache_page(ulong start)
355 */ 381 */
356 382
357ENTRY(__flush_dcache_page) 383ENTRY(__flush_dcache_page)
384
358 entry sp, 16 385 entry sp, 16
359 386
360 ___flush_dcache_page a2 a3 387 ___flush_dcache_page a2 a3
@@ -362,11 +389,14 @@ ENTRY(__flush_dcache_page)
362 dsync 389 dsync
363 retw 390 retw
364 391
392ENDPROC(__flush_dcache_page)
393
365/* 394/*
366 * void __invalidate_icache_range(ulong start, ulong size) 395 * void __invalidate_icache_range(ulong start, ulong size)
367 */ 396 */
368 397
369ENTRY(__invalidate_icache_range) 398ENTRY(__invalidate_icache_range)
399
370 entry sp, 16 400 entry sp, 16
371 401
372 ___invalidate_icache_range a2 a3 a4 402 ___invalidate_icache_range a2 a3 a4
@@ -374,11 +404,14 @@ ENTRY(__invalidate_icache_range)
374 404
375 retw 405 retw
376 406
407ENDPROC(__invalidate_icache_range)
408
377/* 409/*
378 * void __flush_invalidate_dcache_range(ulong start, ulong size) 410 * void __flush_invalidate_dcache_range(ulong start, ulong size)
379 */ 411 */
380 412
381ENTRY(__flush_invalidate_dcache_range) 413ENTRY(__flush_invalidate_dcache_range)
414
382 entry sp, 16 415 entry sp, 16
383 416
384 ___flush_invalidate_dcache_range a2 a3 a4 417 ___flush_invalidate_dcache_range a2 a3 a4
@@ -386,11 +419,14 @@ ENTRY(__flush_invalidate_dcache_range)
386 419
387 retw 420 retw
388 421
422ENDPROC(__flush_invalidate_dcache_range)
423
389/* 424/*
390 * void _flush_dcache_range(ulong start, ulong size) 425 * void _flush_dcache_range(ulong start, ulong size)
391 */ 426 */
392 427
393ENTRY(__flush_dcache_range) 428ENTRY(__flush_dcache_range)
429
394 entry sp, 16 430 entry sp, 16
395 431
396 ___flush_dcache_range a2 a3 a4 432 ___flush_dcache_range a2 a3 a4
@@ -398,22 +434,28 @@ ENTRY(__flush_dcache_range)
398 434
399 retw 435 retw
400 436
437ENDPROC(__flush_dcache_range)
438
401/* 439/*
402 * void _invalidate_dcache_range(ulong start, ulong size) 440 * void _invalidate_dcache_range(ulong start, ulong size)
403 */ 441 */
404 442
405ENTRY(__invalidate_dcache_range) 443ENTRY(__invalidate_dcache_range)
444
406 entry sp, 16 445 entry sp, 16
407 446
408 ___invalidate_dcache_range a2 a3 a4 447 ___invalidate_dcache_range a2 a3 a4
409 448
410 retw 449 retw
411 450
451ENDPROC(__invalidate_dcache_range)
452
412/* 453/*
413 * void _invalidate_icache_all(void) 454 * void _invalidate_icache_all(void)
414 */ 455 */
415 456
416ENTRY(__invalidate_icache_all) 457ENTRY(__invalidate_icache_all)
458
417 entry sp, 16 459 entry sp, 16
418 460
419 ___invalidate_icache_all a2 a3 461 ___invalidate_icache_all a2 a3
@@ -421,11 +463,14 @@ ENTRY(__invalidate_icache_all)
421 463
422 retw 464 retw
423 465
466ENDPROC(__invalidate_icache_all)
467
424/* 468/*
425 * void _flush_invalidate_dcache_all(void) 469 * void _flush_invalidate_dcache_all(void)
426 */ 470 */
427 471
428ENTRY(__flush_invalidate_dcache_all) 472ENTRY(__flush_invalidate_dcache_all)
473
429 entry sp, 16 474 entry sp, 16
430 475
431 ___flush_invalidate_dcache_all a2 a3 476 ___flush_invalidate_dcache_all a2 a3
@@ -433,11 +478,14 @@ ENTRY(__flush_invalidate_dcache_all)
433 478
434 retw 479 retw
435 480
481ENDPROC(__flush_invalidate_dcache_all)
482
436/* 483/*
437 * void _invalidate_dcache_all(void) 484 * void _invalidate_dcache_all(void)
438 */ 485 */
439 486
440ENTRY(__invalidate_dcache_all) 487ENTRY(__invalidate_dcache_all)
488
441 entry sp, 16 489 entry sp, 16
442 490
443 ___invalidate_dcache_all a2 a3 491 ___invalidate_dcache_all a2 a3
@@ -445,3 +493,4 @@ ENTRY(__invalidate_dcache_all)
445 493
446 retw 494 retw
447 495
496ENDPROC(__invalidate_dcache_all)
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index ca81654f3ec2..0f77f9d3bb8b 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -37,7 +37,7 @@ void __init init_mmu(void)
37 37
38 /* Set rasid register to a known value. */ 38 /* Set rasid register to a known value. */
39 39
40 set_rasid_register(ASID_USER_FIRST); 40 set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
41 41
42 /* Set PTEVADDR special register to the start of the page 42 /* Set PTEVADDR special register to the start of the page
43 * table, which is in kernel mappable space (ie. not 43 * table, which is in kernel mappable space (ie. not
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index e2700b21395b..5411aa67c68e 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -63,7 +63,7 @@ void flush_tlb_all (void)
63void flush_tlb_mm(struct mm_struct *mm) 63void flush_tlb_mm(struct mm_struct *mm)
64{ 64{
65 if (mm == current->active_mm) { 65 if (mm == current->active_mm) {
66 int flags; 66 unsigned long flags;
67 local_save_flags(flags); 67 local_save_flags(flags);
68 __get_new_mmu_context(mm); 68 __get_new_mmu_context(mm);
69 __load_mmu_context(mm); 69 __load_mmu_context(mm);
@@ -82,7 +82,7 @@ void flush_tlb_mm(struct mm_struct *mm)
82#endif 82#endif
83 83
84void flush_tlb_range (struct vm_area_struct *vma, 84void flush_tlb_range (struct vm_area_struct *vma,
85 unsigned long start, unsigned long end) 85 unsigned long start, unsigned long end)
86{ 86{
87 struct mm_struct *mm = vma->vm_mm; 87 struct mm_struct *mm = vma->vm_mm;
88 unsigned long flags; 88 unsigned long flags;
@@ -100,7 +100,7 @@ void flush_tlb_range (struct vm_area_struct *vma,
100 int oldpid = get_rasid_register(); 100 int oldpid = get_rasid_register();
101 set_rasid_register (ASID_INSERT(mm->context)); 101 set_rasid_register (ASID_INSERT(mm->context));
102 start &= PAGE_MASK; 102 start &= PAGE_MASK;
103 if (vma->vm_flags & VM_EXEC) 103 if (vma->vm_flags & VM_EXEC)
104 while(start < end) { 104 while(start < end) {
105 invalidate_itlb_mapping(start); 105 invalidate_itlb_mapping(start);
106 invalidate_dtlb_mapping(start); 106 invalidate_dtlb_mapping(start);
@@ -130,7 +130,7 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
130 130
131 local_save_flags(flags); 131 local_save_flags(flags);
132 132
133 oldpid = get_rasid_register(); 133 oldpid = get_rasid_register();
134 134
135 if (vma->vm_flags & VM_EXEC) 135 if (vma->vm_flags & VM_EXEC)
136 invalidate_itlb_mapping(page); 136 invalidate_itlb_mapping(page);
@@ -140,4 +140,3 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
140 140
141 local_irq_restore(flags); 141 local_irq_restore(flags);
142} 142}
143
diff --git a/arch/xtensa/platforms/iss/include/platform/serial.h b/arch/xtensa/platforms/iss/include/platform/serial.h
index e69de29bb2d1..16aec542d435 100644
--- a/arch/xtensa/platforms/iss/include/platform/serial.h
+++ b/arch/xtensa/platforms/iss/include/platform/serial.h
@@ -0,0 +1,15 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 Tensilica Inc.
7 */
8
9#ifndef __ASM_XTENSA_ISS_SERIAL_H
10#define __ASM_XTENSA_ISS_SERIAL_H
11
12/* Have no meaning on ISS, but needed for 8250_early.c */
13#define BASE_BAUD 0
14
15#endif /* __ASM_XTENSA_ISS_SERIAL_H */
diff --git a/arch/xtensa/platforms/iss/include/platform/simcall.h b/arch/xtensa/platforms/iss/include/platform/simcall.h
index bd78192e2fc9..b5a4edf02d76 100644
--- a/arch/xtensa/platforms/iss/include/platform/simcall.h
+++ b/arch/xtensa/platforms/iss/include/platform/simcall.h
@@ -74,13 +74,12 @@ static inline int __simc(int a, int b, int c, int d, int e, int f)
74 "mov %1, a3\n" 74 "mov %1, a3\n"
75 : "=a" (ret), "=a" (errno), "+r"(a1), "+r"(b1) 75 : "=a" (ret), "=a" (errno), "+r"(a1), "+r"(b1)
76 : "r"(c1), "r"(d1), "r"(e1), "r"(f1) 76 : "r"(c1), "r"(d1), "r"(e1), "r"(f1)
77 : ); 77 : "memory");
78 return ret; 78 return ret;
79} 79}
80 80
81static inline int simc_open(const char *file, int flags, int mode) 81static inline int simc_open(const char *file, int flags, int mode)
82{ 82{
83 wmb();
84 return __simc(SYS_open, (int) file, flags, mode, 0, 0); 83 return __simc(SYS_open, (int) file, flags, mode, 0, 0);
85} 84}
86 85
@@ -91,19 +90,16 @@ static inline int simc_close(int fd)
91 90
92static inline int simc_ioctl(int fd, int request, void *arg) 91static inline int simc_ioctl(int fd, int request, void *arg)
93{ 92{
94 wmb();
95 return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0); 93 return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0);
96} 94}
97 95
98static inline int simc_read(int fd, void *buf, size_t count) 96static inline int simc_read(int fd, void *buf, size_t count)
99{ 97{
100 rmb();
101 return __simc(SYS_read, fd, (int) buf, count, 0, 0); 98 return __simc(SYS_read, fd, (int) buf, count, 0, 0);
102} 99}
103 100
104static inline int simc_write(int fd, const void *buf, size_t count) 101static inline int simc_write(int fd, const void *buf, size_t count)
105{ 102{
106 wmb();
107 return __simc(SYS_write, fd, (int) buf, count, 0, 0); 103 return __simc(SYS_write, fd, (int) buf, count, 0, 0);
108} 104}
109 105
@@ -111,7 +107,6 @@ static inline int simc_poll(int fd)
111{ 107{
112 struct timeval tv = { .tv_sec = 0, .tv_usec = 0 }; 108 struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
113 109
114 wmb();
115 return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv, 110 return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv,
116 0, 0); 111 0, 0);
117} 112}
diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
new file mode 100644
index 000000000000..b9ae206340cd
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/Makefile
@@ -0,0 +1,9 @@
1# Makefile for the Tensilica xtavnet Emulation Board
2#
3# Note! Dependencies are done automagically by 'make dep', which also
4# removes any old dependencies. DON'T put your own dependencies here
5# unless it's something special (ie not a .c file).
6#
7# Note 2! The CFLAGS definitions are in the main makefile...
8
9obj-y = setup.o lcd.o
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
new file mode 100644
index 000000000000..4416773cbde5
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -0,0 +1,69 @@
1/*
2 * arch/xtensa/platform/xtavnet/include/platform/hardware.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2006 Tensilica Inc.
9 */
10
11/*
12 * This file contains the hardware configuration of the XTAVNET boards.
13 */
14
15#ifndef __XTENSA_XTAVNET_HARDWARE_H
16#define __XTENSA_XTAVNET_HARDWARE_H
17
18/* By default NO_IRQ is defined to 0 in Linux, but we use the
19 interrupt 0 for UART... */
20#define NO_IRQ -1
21
22/* Memory configuration. */
23
24#define PLATFORM_DEFAULT_MEM_START 0x00000000
25#define PLATFORM_DEFAULT_MEM_SIZE 0x04000000
26
27/* Interrupt configuration. */
28
29#define PLATFORM_NR_IRQS 10
30
31/* Default assignment of LX60 devices to external interrupts. */
32
33#ifdef CONFIG_ARCH_HAS_SMP
34#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
35#define OETH_IRQ XCHAL_EXTINT4_NUM
36#else
37#define DUART16552_INTNUM XCHAL_EXTINT0_NUM
38#define OETH_IRQ XCHAL_EXTINT1_NUM
39#endif
40
41/*
42 * Device addresses and parameters.
43 */
44
45/* UART */
46#define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020)
47/* LCD instruction and data addresses. */
48#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000))
49#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004))
50
51/* Misc. */
52#define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000)
53/* Clock frequency in Hz (read-only): */
54#define XTFPGA_CLKFRQ_VADDR (XTFPGA_FPGAREGS_VADDR + 0x04)
55/* Setting of 8 DIP switches: */
56#define DIP_SWITCHES_VADDR (XTFPGA_FPGAREGS_VADDR + 0x0C)
57/* Software reset (write 0xdead): */
58#define XTFPGA_SWRST_VADDR (XTFPGA_FPGAREGS_VADDR + 0x10)
59
60/* OpenCores Ethernet controller: */
61 /* regs + RX/TX descriptors */
62#define OETH_REGS_PADDR (XCHAL_KIO_PADDR + 0x0D030000)
63#define OETH_REGS_SIZE 0x1000
64#define OETH_SRAMBUFF_PADDR (XCHAL_KIO_PADDR + 0x0D800000)
65
66 /* 5*rx buffs + 5*tx buffs */
67#define OETH_SRAMBUFF_SIZE (5 * 0x600 + 5 * 0x600)
68
69#endif /* __XTENSA_XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
new file mode 100644
index 000000000000..0e435645af5a
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
@@ -0,0 +1,20 @@
1/*
2 * arch/xtensa/platform/xtavnet/include/platform/lcd.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001, 2006 Tensilica Inc.
9 */
10
11#ifndef __XTENSA_XTAVNET_LCD_H
12#define __XTENSA_XTAVNET_LCD_H
13
14/* Display string STR at position POS on the LCD. */
15void lcd_disp_at_pos(char *str, unsigned char pos);
16
17/* Shift the contents of the LCD display left or right. */
18void lcd_shiftleft(void);
19void lcd_shiftright(void);
20#endif
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/serial.h b/arch/xtensa/platforms/xtfpga/include/platform/serial.h
new file mode 100644
index 000000000000..14d8f7beebfd
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/include/platform/serial.h
@@ -0,0 +1,18 @@
1/*
2 * arch/xtensa/platform/xtavnet/include/platform/serial.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001, 2006 Tensilica Inc.
9 */
10
11#ifndef __ASM_XTENSA_XTAVNET_SERIAL_H
12#define __ASM_XTENSA_XTAVNET_SERIAL_H
13
14#include <platform/hardware.h>
15
16#define BASE_BAUD (*(long *)XTFPGA_CLKFRQ_VADDR / 16)
17
18#endif /* __ASM_XTENSA_XTAVNET_SERIAL_H */
diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
new file mode 100644
index 000000000000..2872301598df
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/lcd.c
@@ -0,0 +1,76 @@
1/*
2 * Driver for the LCD display on the Tensilica LX60 Board.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001, 2006 Tensilica Inc.
9 */
10
11/*
12 *
13 * FIXME: this code is from the examples from the LX60 user guide.
14 *
15 * The lcd_pause function does busy waiting, which is probably not
16 * great. Maybe the code could be changed to use kernel timers, or
17 * change the hardware to not need to wait.
18 */
19
20#include <linux/init.h>
21#include <linux/io.h>
22
23#include <platform/hardware.h>
24#include <platform/lcd.h>
25#include <linux/delay.h>
26
27#define LCD_PAUSE_ITERATIONS 4000
28#define LCD_CLEAR 0x1
29#define LCD_DISPLAY_ON 0xc
30
31/* 8bit and 2 lines display */
32#define LCD_DISPLAY_MODE8BIT 0x38
33#define LCD_DISPLAY_POS 0x80
34#define LCD_SHIFT_LEFT 0x18
35#define LCD_SHIFT_RIGHT 0x1c
36
37static int __init lcd_init(void)
38{
39 *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
40 mdelay(5);
41 *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
42 udelay(200);
43 *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
44 udelay(50);
45 *LCD_INSTR_ADDR = LCD_DISPLAY_ON;
46 udelay(50);
47 *LCD_INSTR_ADDR = LCD_CLEAR;
48 mdelay(10);
49 lcd_disp_at_pos("XTENSA LINUX", 0);
50 return 0;
51}
52
53void lcd_disp_at_pos(char *str, unsigned char pos)
54{
55 *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
56 udelay(100);
57 while (*str != 0) {
58 *LCD_DATA_ADDR = *str;
59 udelay(200);
60 str++;
61 }
62}
63
64void lcd_shiftleft(void)
65{
66 *LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
67 udelay(50);
68}
69
70void lcd_shiftright(void)
71{
72 *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
73 udelay(50);
74}
75
76arch_initcall(lcd_init);
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
new file mode 100644
index 000000000000..4b9951a4569d
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -0,0 +1,301 @@
1/*
2 *
3 * arch/xtensa/platform/xtavnet/setup.c
4 *
5 * ...
6 *
7 * Authors: Chris Zankel <chris@zankel.net>
8 * Joe Taylor <joe@tensilica.com>
9 *
10 * Copyright 2001 - 2006 Tensilica Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 */
18#include <linux/stddef.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/errno.h>
22#include <linux/reboot.h>
23#include <linux/kdev_t.h>
24#include <linux/types.h>
25#include <linux/major.h>
26#include <linux/console.h>
27#include <linux/delay.h>
28#include <linux/of.h>
29
30#include <asm/timex.h>
31#include <asm/processor.h>
32#include <asm/platform.h>
33#include <asm/bootparam.h>
34#include <platform/lcd.h>
35#include <platform/hardware.h>
36
37void platform_halt(void)
38{
39 lcd_disp_at_pos(" HALT ", 0);
40 local_irq_disable();
41 while (1)
42 cpu_relax();
43}
44
45void platform_power_off(void)
46{
47 lcd_disp_at_pos("POWEROFF", 0);
48 local_irq_disable();
49 while (1)
50 cpu_relax();
51}
52
53void platform_restart(void)
54{
55 /* Flush and reset the mmu, simulate a processor reset, and
56 * jump to the reset vector. */
57
58
59 __asm__ __volatile__ ("movi a2, 15\n\t"
60 "wsr a2, icountlevel\n\t"
61 "movi a2, 0\n\t"
62 "wsr a2, icount\n\t"
63 "wsr a2, ibreakenable\n\t"
64 "wsr a2, lcount\n\t"
65 "movi a2, 0x1f\n\t"
66 "wsr a2, ps\n\t"
67 "isync\n\t"
68 "jx %0\n\t"
69 :
70 : "a" (XCHAL_RESET_VECTOR_VADDR)
71 : "a2"
72 );
73
74 /* control never gets here */
75}
76
77void __init platform_setup(char **cmdline)
78{
79}
80
81#ifdef CONFIG_OF
82
83static void __init update_clock_frequency(struct device_node *node)
84{
85 struct property *newfreq;
86 u32 freq;
87
88 if (!of_property_read_u32(node, "clock-frequency", &freq) && freq != 0)
89 return;
90
91 newfreq = kzalloc(sizeof(*newfreq) + sizeof(u32), GFP_KERNEL);
92 if (!newfreq)
93 return;
94 newfreq->value = newfreq + 1;
95 newfreq->length = sizeof(freq);
96 newfreq->name = kstrdup("clock-frequency", GFP_KERNEL);
97 if (!newfreq->name) {
98 kfree(newfreq);
99 return;
100 }
101
102 *(u32 *)newfreq->value = cpu_to_be32(*(u32 *)XTFPGA_CLKFRQ_VADDR);
103 prom_update_property(node, newfreq);
104}
105
106#define MAC_LEN 6
107static void __init update_local_mac(struct device_node *node)
108{
109 struct property *newmac;
110 const u8* macaddr;
111 int prop_len;
112
113 macaddr = of_get_property(node, "local-mac-address", &prop_len);
114 if (macaddr == NULL || prop_len != MAC_LEN)
115 return;
116
117 newmac = kzalloc(sizeof(*newmac) + MAC_LEN, GFP_KERNEL);
118 if (newmac == NULL)
119 return;
120
121 newmac->value = newmac + 1;
122 newmac->length = MAC_LEN;
123 newmac->name = kstrdup("local-mac-address", GFP_KERNEL);
124 if (newmac->name == NULL) {
125 kfree(newmac);
126 return;
127 }
128
129 memcpy(newmac->value, macaddr, MAC_LEN);
130 ((u8*)newmac->value)[5] = (*(u32*)DIP_SWITCHES_VADDR) & 0x3f;
131 prom_update_property(node, newmac);
132}
133
134static int __init machine_setup(void)
135{
136 struct device_node *serial;
137 struct device_node *eth = NULL;
138
139 for_each_compatible_node(serial, NULL, "ns16550a")
140 update_clock_frequency(serial);
141
142 if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
143 update_local_mac(eth);
144 return 0;
145}
146arch_initcall(machine_setup);
147
148#endif
149
150/* early initialization */
151
152void __init platform_init(bp_tag_t *first)
153{
154}
155
156/* Heartbeat. */
157
158void platform_heartbeat(void)
159{
160}
161
162#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
163
164void platform_calibrate_ccount(void)
165{
166 long clk_freq = 0;
167#ifdef CONFIG_OF
168 struct device_node *cpu =
169 of_find_compatible_node(NULL, NULL, "xtensa,cpu");
170 if (cpu) {
171 u32 freq;
172 update_clock_frequency(cpu);
173 if (!of_property_read_u32(cpu, "clock-frequency", &freq))
174 clk_freq = freq;
175 }
176#endif
177 if (!clk_freq)
178 clk_freq = *(long *)XTFPGA_CLKFRQ_VADDR;
179
180 ccount_per_jiffy = clk_freq / HZ;
181 nsec_per_ccount = 1000000000UL / clk_freq;
182}
183
184#endif
185
186#ifndef CONFIG_OF
187
188#include <linux/serial_8250.h>
189#include <linux/if.h>
190#include <net/ethoc.h>
191
192/*----------------------------------------------------------------------------
193 * Ethernet -- OpenCores Ethernet MAC (ethoc driver)
194 */
195
196static struct resource ethoc_res[] __initdata = {
197 [0] = { /* register space */
198 .start = OETH_REGS_PADDR,
199 .end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1,
200 .flags = IORESOURCE_MEM,
201 },
202 [1] = { /* buffer space */
203 .start = OETH_SRAMBUFF_PADDR,
204 .end = OETH_SRAMBUFF_PADDR + OETH_SRAMBUFF_SIZE - 1,
205 .flags = IORESOURCE_MEM,
206 },
207 [2] = { /* IRQ number */
208 .start = OETH_IRQ,
209 .end = OETH_IRQ,
210 .flags = IORESOURCE_IRQ,
211 },
212};
213
214static struct ethoc_platform_data ethoc_pdata __initdata = {
215 /*
216 * The MAC address for these boards is 00:50:c2:13:6f:xx.
217 * The last byte (here as zero) is read from the DIP switches on the
218 * board.
219 */
220 .hwaddr = { 0x00, 0x50, 0xc2, 0x13, 0x6f, 0 },
221 .phy_id = -1,
222};
223
224static struct platform_device ethoc_device __initdata = {
225 .name = "ethoc",
226 .id = -1,
227 .num_resources = ARRAY_SIZE(ethoc_res),
228 .resource = ethoc_res,
229 .dev = {
230 .platform_data = &ethoc_pdata,
231 },
232};
233
234/*----------------------------------------------------------------------------
235 * UART
236 */
237
238static struct resource serial_resource __initdata = {
239 .start = DUART16552_PADDR,
240 .end = DUART16552_PADDR + 0x1f,
241 .flags = IORESOURCE_MEM,
242};
243
244static struct plat_serial8250_port serial_platform_data[] __initdata = {
245 [0] = {
246 .mapbase = DUART16552_PADDR,
247 .irq = DUART16552_INTNUM,
248 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
249 UPF_IOREMAP,
250 .iotype = UPIO_MEM32,
251 .regshift = 2,
252 .uartclk = 0, /* set in xtavnet_init() */
253 },
254 { },
255};
256
257static struct platform_device xtavnet_uart __initdata = {
258 .name = "serial8250",
259 .id = PLAT8250_DEV_PLATFORM,
260 .dev = {
261 .platform_data = serial_platform_data,
262 },
263 .num_resources = 1,
264 .resource = &serial_resource,
265};
266
267/* platform devices */
268static struct platform_device *platform_devices[] __initdata = {
269 &ethoc_device,
270 &xtavnet_uart,
271};
272
273
274static int __init xtavnet_init(void)
275{
276 /* Ethernet MAC address. */
277 ethoc_pdata.hwaddr[5] = *(u32 *)DIP_SWITCHES_VADDR;
278
279 /* Clock rate varies among FPGA bitstreams; board specific FPGA register
280 * reports the actual clock rate.
281 */
282 serial_platform_data[0].uartclk = *(long *)XTFPGA_CLKFRQ_VADDR;
283
284
285 /* register platform devices */
286 platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
287
288 /* ETHOC driver is a bit quiet; at least display Ethernet MAC, so user
289 * knows whether they set it correctly on the DIP switches.
290 */
291 pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr);
292
293 return 0;
294}
295
296/*
297 * Register to be done during do_initcalls().
298 */
299arch_initcall(xtavnet_init);
300
301#endif /* CONFIG_OF */
diff --git a/arch/xtensa/variants/s6000/gpio.c b/arch/xtensa/variants/s6000/gpio.c
index b89541ba39ab..da9e85c13b08 100644
--- a/arch/xtensa/variants/s6000/gpio.c
+++ b/arch/xtensa/variants/s6000/gpio.c
@@ -164,7 +164,7 @@ static void demux_irqs(unsigned int irq, struct irq_desc *desc)
164 int cirq; 164 int cirq;
165 165
166 chip->irq_mask(&desc->irq_data); 166 chip->irq_mask(&desc->irq_data);
167 chip->irq_ack(&desc->irq_data)); 167 chip->irq_ack(&desc->irq_data);
168 pending = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_MIS) & *mask; 168 pending = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_MIS) & *mask;
169 cirq = IRQ_BASE - 1; 169 cirq = IRQ_BASE - 1;
170 while (pending) { 170 while (pending) {
@@ -173,7 +173,7 @@ static void demux_irqs(unsigned int irq, struct irq_desc *desc)
173 pending >>= n; 173 pending >>= n;
174 generic_handle_irq(cirq); 174 generic_handle_irq(cirq);
175 } 175 }
176 chip->irq_unmask(&desc->irq_data)); 176 chip->irq_unmask(&desc->irq_data);
177} 177}
178 178
179extern const signed char *platform_irq_mappings[XTENSA_NR_IRQS]; 179extern const signed char *platform_irq_mappings[XTENSA_NR_IRQS];
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index c909b7b7d5f1..d70abe77f737 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -42,7 +42,8 @@
42#include <linux/swab.h> 42#include <linux/swab.h>
43#include <linux/slab.h> 43#include <linux/slab.h>
44 44
45#define VERSION "0.07" 45#define VERSION "1.04"
46#define DRIVER_VERSION 0x01
46#define PTAG "solos-pci" 47#define PTAG "solos-pci"
47 48
48#define CONFIG_RAM_SIZE 128 49#define CONFIG_RAM_SIZE 128
@@ -56,16 +57,21 @@
56#define FLASH_BUSY 0x60 57#define FLASH_BUSY 0x60
57#define FPGA_MODE 0x5C 58#define FPGA_MODE 0x5C
58#define FLASH_MODE 0x58 59#define FLASH_MODE 0x58
60#define GPIO_STATUS 0x54
61#define DRIVER_VER 0x50
59#define TX_DMA_ADDR(port) (0x40 + (4 * (port))) 62#define TX_DMA_ADDR(port) (0x40 + (4 * (port)))
60#define RX_DMA_ADDR(port) (0x30 + (4 * (port))) 63#define RX_DMA_ADDR(port) (0x30 + (4 * (port)))
61 64
62#define DATA_RAM_SIZE 32768 65#define DATA_RAM_SIZE 32768
63#define BUF_SIZE 2048 66#define BUF_SIZE 2048
64#define OLD_BUF_SIZE 4096 /* For FPGA versions <= 2*/ 67#define OLD_BUF_SIZE 4096 /* For FPGA versions <= 2*/
65#define FPGA_PAGE 528 /* FPGA flash page size*/ 68/* Old boards use ATMEL AD45DB161D flash */
66#define SOLOS_PAGE 512 /* Solos flash page size*/ 69#define ATMEL_FPGA_PAGE 528 /* FPGA flash page size*/
67#define FPGA_BLOCK (FPGA_PAGE * 8) /* FPGA flash block size*/ 70#define ATMEL_SOLOS_PAGE 512 /* Solos flash page size*/
68#define SOLOS_BLOCK (SOLOS_PAGE * 8) /* Solos flash block size*/ 71#define ATMEL_FPGA_BLOCK (ATMEL_FPGA_PAGE * 8) /* FPGA block size*/
72#define ATMEL_SOLOS_BLOCK (ATMEL_SOLOS_PAGE * 8) /* Solos block size*/
73/* Current boards use M25P/M25PE SPI flash */
74#define SPI_FLASH_BLOCK (256 * 64)
69 75
70#define RX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2) 76#define RX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2)
71#define TX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2 + (card->buffer_size)) 77#define TX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2 + (card->buffer_size))
@@ -122,11 +128,14 @@ struct solos_card {
122 struct sk_buff_head cli_queue[4]; 128 struct sk_buff_head cli_queue[4];
123 struct sk_buff *tx_skb[4]; 129 struct sk_buff *tx_skb[4];
124 struct sk_buff *rx_skb[4]; 130 struct sk_buff *rx_skb[4];
131 unsigned char *dma_bounce;
125 wait_queue_head_t param_wq; 132 wait_queue_head_t param_wq;
126 wait_queue_head_t fw_wq; 133 wait_queue_head_t fw_wq;
127 int using_dma; 134 int using_dma;
135 int dma_alignment;
128 int fpga_version; 136 int fpga_version;
129 int buffer_size; 137 int buffer_size;
138 int atmel_flash;
130}; 139};
131 140
132 141
@@ -451,7 +460,6 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
451 460
452 len = skb->len; 461 len = skb->len;
453 memcpy(buf, skb->data, len); 462 memcpy(buf, skb->data, len);
454 dev_dbg(&card->dev->dev, "len: %d\n", len);
455 463
456 kfree_skb(skb); 464 kfree_skb(skb);
457 return len; 465 return len;
@@ -498,6 +506,78 @@ static ssize_t console_store(struct device *dev, struct device_attribute *attr,
498 return err?:count; 506 return err?:count;
499} 507}
500 508
509struct geos_gpio_attr {
510 struct device_attribute attr;
511 int offset;
512};
513
514#define SOLOS_GPIO_ATTR(_name, _mode, _show, _store, _offset) \
515 struct geos_gpio_attr gpio_attr_##_name = { \
516 .attr = __ATTR(_name, _mode, _show, _store), \
517 .offset = _offset }
518
519static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr,
520 const char *buf, size_t count)
521{
522 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
523 struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
524 struct solos_card *card = pci_get_drvdata(pdev);
525 uint32_t data32;
526
527 if (count != 1 && (count != 2 || buf[1] != '\n'))
528 return -EINVAL;
529
530 spin_lock_irq(&card->param_queue_lock);
531 data32 = ioread32(card->config_regs + GPIO_STATUS);
532 if (buf[0] == '1') {
533 data32 |= 1 << gattr->offset;
534 iowrite32(data32, card->config_regs + GPIO_STATUS);
535 } else if (buf[0] == '0') {
536 data32 &= ~(1 << gattr->offset);
537 iowrite32(data32, card->config_regs + GPIO_STATUS);
538 } else {
539 count = -EINVAL;
540 }
541 spin_lock_irq(&card->param_queue_lock);
542 return count;
543}
544
545static ssize_t geos_gpio_show(struct device *dev, struct device_attribute *attr,
546 char *buf)
547{
548 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
549 struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
550 struct solos_card *card = pci_get_drvdata(pdev);
551 uint32_t data32;
552
553 data32 = ioread32(card->config_regs + GPIO_STATUS);
554 data32 = (data32 >> gattr->offset) & 1;
555
556 return sprintf(buf, "%d\n", data32);
557}
558
559static ssize_t hardware_show(struct device *dev, struct device_attribute *attr,
560 char *buf)
561{
562 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
563 struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
564 struct solos_card *card = pci_get_drvdata(pdev);
565 uint32_t data32;
566
567 data32 = ioread32(card->config_regs + GPIO_STATUS);
568 switch (gattr->offset) {
569 case 0:
570 /* HardwareVersion */
571 data32 = data32 & 0x1F;
572 break;
573 case 1:
574 /* HardwareVariant */
575 data32 = (data32 >> 5) & 0x0F;
576 break;
577 }
578 return sprintf(buf, "%d\n", data32);
579}
580
501static DEVICE_ATTR(console, 0644, console_show, console_store); 581static DEVICE_ATTR(console, 0644, console_show, console_store);
502 582
503 583
@@ -506,6 +586,14 @@ static DEVICE_ATTR(console, 0644, console_show, console_store);
506 586
507#include "solos-attrlist.c" 587#include "solos-attrlist.c"
508 588
589static SOLOS_GPIO_ATTR(GPIO1, 0644, geos_gpio_show, geos_gpio_store, 9);
590static SOLOS_GPIO_ATTR(GPIO2, 0644, geos_gpio_show, geos_gpio_store, 10);
591static SOLOS_GPIO_ATTR(GPIO3, 0644, geos_gpio_show, geos_gpio_store, 11);
592static SOLOS_GPIO_ATTR(GPIO4, 0644, geos_gpio_show, geos_gpio_store, 12);
593static SOLOS_GPIO_ATTR(GPIO5, 0644, geos_gpio_show, geos_gpio_store, 13);
594static SOLOS_GPIO_ATTR(PushButton, 0444, geos_gpio_show, NULL, 14);
595static SOLOS_GPIO_ATTR(HardwareVersion, 0444, hardware_show, NULL, 0);
596static SOLOS_GPIO_ATTR(HardwareVariant, 0444, hardware_show, NULL, 1);
509#undef SOLOS_ATTR_RO 597#undef SOLOS_ATTR_RO
510#undef SOLOS_ATTR_RW 598#undef SOLOS_ATTR_RW
511 599
@@ -522,6 +610,23 @@ static struct attribute_group solos_attr_group = {
522 .name = "parameters", 610 .name = "parameters",
523}; 611};
524 612
613static struct attribute *gpio_attrs[] = {
614 &gpio_attr_GPIO1.attr.attr,
615 &gpio_attr_GPIO2.attr.attr,
616 &gpio_attr_GPIO3.attr.attr,
617 &gpio_attr_GPIO4.attr.attr,
618 &gpio_attr_GPIO5.attr.attr,
619 &gpio_attr_PushButton.attr.attr,
620 &gpio_attr_HardwareVersion.attr.attr,
621 &gpio_attr_HardwareVariant.attr.attr,
622 NULL
623};
624
625static struct attribute_group gpio_attr_group = {
626 .attrs = gpio_attrs,
627 .name = "gpio",
628};
629
525static int flash_upgrade(struct solos_card *card, int chip) 630static int flash_upgrade(struct solos_card *card, int chip)
526{ 631{
527 const struct firmware *fw; 632 const struct firmware *fw;
@@ -533,16 +638,25 @@ static int flash_upgrade(struct solos_card *card, int chip)
533 switch (chip) { 638 switch (chip) {
534 case 0: 639 case 0:
535 fw_name = "solos-FPGA.bin"; 640 fw_name = "solos-FPGA.bin";
536 blocksize = FPGA_BLOCK; 641 if (card->atmel_flash)
642 blocksize = ATMEL_FPGA_BLOCK;
643 else
644 blocksize = SPI_FLASH_BLOCK;
537 break; 645 break;
538 case 1: 646 case 1:
539 fw_name = "solos-Firmware.bin"; 647 fw_name = "solos-Firmware.bin";
540 blocksize = SOLOS_BLOCK; 648 if (card->atmel_flash)
649 blocksize = ATMEL_SOLOS_BLOCK;
650 else
651 blocksize = SPI_FLASH_BLOCK;
541 break; 652 break;
542 case 2: 653 case 2:
543 if (card->fpga_version > LEGACY_BUFFERS){ 654 if (card->fpga_version > LEGACY_BUFFERS){
544 fw_name = "solos-db-FPGA.bin"; 655 fw_name = "solos-db-FPGA.bin";
545 blocksize = FPGA_BLOCK; 656 if (card->atmel_flash)
657 blocksize = ATMEL_FPGA_BLOCK;
658 else
659 blocksize = SPI_FLASH_BLOCK;
546 } else { 660 } else {
547 dev_info(&card->dev->dev, "FPGA version doesn't support" 661 dev_info(&card->dev->dev, "FPGA version doesn't support"
548 " daughter board upgrades\n"); 662 " daughter board upgrades\n");
@@ -552,7 +666,10 @@ static int flash_upgrade(struct solos_card *card, int chip)
552 case 3: 666 case 3:
553 if (card->fpga_version > LEGACY_BUFFERS){ 667 if (card->fpga_version > LEGACY_BUFFERS){
554 fw_name = "solos-Firmware.bin"; 668 fw_name = "solos-Firmware.bin";
555 blocksize = SOLOS_BLOCK; 669 if (card->atmel_flash)
670 blocksize = ATMEL_SOLOS_BLOCK;
671 else
672 blocksize = SPI_FLASH_BLOCK;
556 } else { 673 } else {
557 dev_info(&card->dev->dev, "FPGA version doesn't support" 674 dev_info(&card->dev->dev, "FPGA version doesn't support"
558 " daughter board upgrades\n"); 675 " daughter board upgrades\n");
@@ -568,6 +685,9 @@ static int flash_upgrade(struct solos_card *card, int chip)
568 685
569 dev_info(&card->dev->dev, "Flash upgrade starting\n"); 686 dev_info(&card->dev->dev, "Flash upgrade starting\n");
570 687
688 /* New FPGAs require driver version before permitting flash upgrades */
689 iowrite32(DRIVER_VERSION, card->config_regs + DRIVER_VER);
690
571 numblocks = fw->size / blocksize; 691 numblocks = fw->size / blocksize;
572 dev_info(&card->dev->dev, "Firmware size: %zd\n", fw->size); 692 dev_info(&card->dev->dev, "Firmware size: %zd\n", fw->size);
573 dev_info(&card->dev->dev, "Number of blocks: %d\n", numblocks); 693 dev_info(&card->dev->dev, "Number of blocks: %d\n", numblocks);
@@ -597,9 +717,13 @@ static int flash_upgrade(struct solos_card *card, int chip)
597 /* dev_info(&card->dev->dev, "Set FPGA Flash mode to Block Write\n"); */ 717 /* dev_info(&card->dev->dev, "Set FPGA Flash mode to Block Write\n"); */
598 iowrite32(((chip * 2) + 1), card->config_regs + FLASH_MODE); 718 iowrite32(((chip * 2) + 1), card->config_regs + FLASH_MODE);
599 719
600 /* Copy block to buffer, swapping each 16 bits */ 720 /* Copy block to buffer, swapping each 16 bits for Atmel flash */
601 for(i = 0; i < blocksize; i += 4) { 721 for(i = 0; i < blocksize; i += 4) {
602 uint32_t word = swahb32p((uint32_t *)(fw->data + offset + i)); 722 uint32_t word;
723 if (card->atmel_flash)
724 word = swahb32p((uint32_t *)(fw->data + offset + i));
725 else
726 word = *(uint32_t *)(fw->data + offset + i);
603 if(card->fpga_version > LEGACY_BUFFERS) 727 if(card->fpga_version > LEGACY_BUFFERS)
604 iowrite32(word, FLASH_BUF + i); 728 iowrite32(word, FLASH_BUF + i);
605 else 729 else
@@ -961,7 +1085,12 @@ static uint32_t fpga_tx(struct solos_card *card)
961 tx_started |= 1 << port; 1085 tx_started |= 1 << port;
962 oldskb = skb; /* We're done with this skb already */ 1086 oldskb = skb; /* We're done with this skb already */
963 } else if (skb && card->using_dma) { 1087 } else if (skb && card->using_dma) {
964 SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data, 1088 unsigned char *data = skb->data;
1089 if ((unsigned long)data & card->dma_alignment) {
1090 data = card->dma_bounce + (BUF_SIZE * port);
1091 memcpy(data, skb->data, skb->len);
1092 }
1093 SKB_CB(skb)->dma_addr = pci_map_single(card->dev, data,
965 skb->len, PCI_DMA_TODEVICE); 1094 skb->len, PCI_DMA_TODEVICE);
966 card->tx_skb[port] = skb; 1095 card->tx_skb[port] = skb;
967 iowrite32(SKB_CB(skb)->dma_addr, 1096 iowrite32(SKB_CB(skb)->dma_addr,
@@ -1133,18 +1262,33 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
1133 db_fpga_upgrade = db_firmware_upgrade = 0; 1262 db_fpga_upgrade = db_firmware_upgrade = 0;
1134 } 1263 }
1135 1264
1265 /* Stopped using Atmel flash after 0.03-38 */
1266 if (fpga_ver < 39)
1267 card->atmel_flash = 1;
1268 else
1269 card->atmel_flash = 0;
1270
1271 data32 = ioread32(card->config_regs + PORTS);
1272 card->nr_ports = (data32 & 0x000000FF);
1273
1136 if (card->fpga_version >= DMA_SUPPORTED) { 1274 if (card->fpga_version >= DMA_SUPPORTED) {
1137 pci_set_master(dev); 1275 pci_set_master(dev);
1138 card->using_dma = 1; 1276 card->using_dma = 1;
1277 if (1) { /* All known FPGA versions so far */
1278 card->dma_alignment = 3;
1279 card->dma_bounce = kmalloc(card->nr_ports * BUF_SIZE, GFP_KERNEL);
1280 if (!card->dma_bounce) {
1281 dev_warn(&card->dev->dev, "Failed to allocate DMA bounce buffers\n");
1282 /* Fallback to MMIO doesn't work */
1283 goto out_unmap_both;
1284 }
1285 }
1139 } else { 1286 } else {
1140 card->using_dma = 0; 1287 card->using_dma = 0;
1141 /* Set RX empty flag for all ports */ 1288 /* Set RX empty flag for all ports */
1142 iowrite32(0xF0, card->config_regs + FLAGS_ADDR); 1289 iowrite32(0xF0, card->config_regs + FLAGS_ADDR);
1143 } 1290 }
1144 1291
1145 data32 = ioread32(card->config_regs + PORTS);
1146 card->nr_ports = (data32 & 0x000000FF);
1147
1148 pci_set_drvdata(dev, card); 1292 pci_set_drvdata(dev, card);
1149 1293
1150 tasklet_init(&card->tlet, solos_bh, (unsigned long)card); 1294 tasklet_init(&card->tlet, solos_bh, (unsigned long)card);
@@ -1179,6 +1323,10 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
1179 if (err) 1323 if (err)
1180 goto out_free_irq; 1324 goto out_free_irq;
1181 1325
1326 if (card->fpga_version >= DMA_SUPPORTED &&
1327 sysfs_create_group(&card->dev->dev.kobj, &gpio_attr_group))
1328 dev_err(&card->dev->dev, "Could not register parameter group for GPIOs\n");
1329
1182 return 0; 1330 return 0;
1183 1331
1184 out_free_irq: 1332 out_free_irq:
@@ -1187,6 +1335,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
1187 tasklet_kill(&card->tlet); 1335 tasklet_kill(&card->tlet);
1188 1336
1189 out_unmap_both: 1337 out_unmap_both:
1338 kfree(card->dma_bounce);
1190 pci_set_drvdata(dev, NULL); 1339 pci_set_drvdata(dev, NULL);
1191 pci_iounmap(dev, card->buffers); 1340 pci_iounmap(dev, card->buffers);
1192 out_unmap_config: 1341 out_unmap_config:
@@ -1289,11 +1438,16 @@ static void fpga_remove(struct pci_dev *dev)
1289 iowrite32(1, card->config_regs + FPGA_MODE); 1438 iowrite32(1, card->config_regs + FPGA_MODE);
1290 (void)ioread32(card->config_regs + FPGA_MODE); 1439 (void)ioread32(card->config_regs + FPGA_MODE);
1291 1440
1441 if (card->fpga_version >= DMA_SUPPORTED)
1442 sysfs_remove_group(&card->dev->dev.kobj, &gpio_attr_group);
1443
1292 atm_remove(card); 1444 atm_remove(card);
1293 1445
1294 free_irq(dev->irq, card); 1446 free_irq(dev->irq, card);
1295 tasklet_kill(&card->tlet); 1447 tasklet_kill(&card->tlet);
1296 1448
1449 kfree(card->dma_bounce);
1450
1297 /* Release device from reset */ 1451 /* Release device from reset */
1298 iowrite32(0, card->config_regs + FPGA_MODE); 1452 iowrite32(0, card->config_regs + FPGA_MODE);
1299 (void)ioread32(card->config_regs + FPGA_MODE); 1453 (void)ioread32(card->config_regs + FPGA_MODE);
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index e162999bf916..c62c788b3289 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -13,12 +13,13 @@
13#include <linux/export.h> 13#include <linux/export.h>
14#include <linux/bcma/bcma.h> 14#include <linux/bcma/bcma.h>
15 15
16static u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset) 16u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
17{ 17{
18 bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); 18 bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
19 bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR); 19 bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
20 return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA); 20 return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
21} 21}
22EXPORT_SYMBOL_GPL(bcma_chipco_pll_read);
22 23
23void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value) 24void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value)
24{ 25{
diff --git a/drivers/char/random.c b/drivers/char/random.c
index b86eae9b77df..85e81ec1451e 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -399,7 +399,6 @@ static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
399static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); 399static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
400static struct fasync_struct *fasync; 400static struct fasync_struct *fasync;
401 401
402#if 0
403static bool debug; 402static bool debug;
404module_param(debug, bool, 0644); 403module_param(debug, bool, 0644);
405#define DEBUG_ENT(fmt, arg...) do { \ 404#define DEBUG_ENT(fmt, arg...) do { \
@@ -410,9 +409,6 @@ module_param(debug, bool, 0644);
410 blocking_pool.entropy_count,\ 409 blocking_pool.entropy_count,\
411 nonblocking_pool.entropy_count,\ 410 nonblocking_pool.entropy_count,\
412 ## arg); } while (0) 411 ## arg); } while (0)
413#else
414#define DEBUG_ENT(fmt, arg...) do {} while (0)
415#endif
416 412
417/********************************************************************** 413/**********************************************************************
418 * 414 *
@@ -437,6 +433,7 @@ struct entropy_store {
437 int entropy_count; 433 int entropy_count;
438 int entropy_total; 434 int entropy_total;
439 unsigned int initialized:1; 435 unsigned int initialized:1;
436 bool last_data_init;
440 __u8 last_data[EXTRACT_SIZE]; 437 __u8 last_data[EXTRACT_SIZE];
441}; 438};
442 439
@@ -829,7 +826,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
829 bytes = min_t(int, bytes, sizeof(tmp)); 826 bytes = min_t(int, bytes, sizeof(tmp));
830 827
831 DEBUG_ENT("going to reseed %s with %d bits " 828 DEBUG_ENT("going to reseed %s with %d bits "
832 "(%d of %d requested)\n", 829 "(%zu of %d requested)\n",
833 r->name, bytes * 8, nbytes * 8, r->entropy_count); 830 r->name, bytes * 8, nbytes * 8, r->entropy_count);
834 831
835 bytes = extract_entropy(r->pull, tmp, bytes, 832 bytes = extract_entropy(r->pull, tmp, bytes,
@@ -860,7 +857,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
860 spin_lock_irqsave(&r->lock, flags); 857 spin_lock_irqsave(&r->lock, flags);
861 858
862 BUG_ON(r->entropy_count > r->poolinfo->POOLBITS); 859 BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
863 DEBUG_ENT("trying to extract %d bits from %s\n", 860 DEBUG_ENT("trying to extract %zu bits from %s\n",
864 nbytes * 8, r->name); 861 nbytes * 8, r->name);
865 862
866 /* Can we pull enough? */ 863 /* Can we pull enough? */
@@ -882,7 +879,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
882 } 879 }
883 } 880 }
884 881
885 DEBUG_ENT("debiting %d entropy credits from %s%s\n", 882 DEBUG_ENT("debiting %zu entropy credits from %s%s\n",
886 nbytes * 8, r->name, r->limit ? "" : " (unlimited)"); 883 nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
887 884
888 spin_unlock_irqrestore(&r->lock, flags); 885 spin_unlock_irqrestore(&r->lock, flags);
@@ -957,6 +954,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
957 ssize_t ret = 0, i; 954 ssize_t ret = 0, i;
958 __u8 tmp[EXTRACT_SIZE]; 955 __u8 tmp[EXTRACT_SIZE];
959 956
957 /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
958 if (fips_enabled && !r->last_data_init)
959 nbytes += EXTRACT_SIZE;
960
960 trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_); 961 trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
961 xfer_secondary_pool(r, nbytes); 962 xfer_secondary_pool(r, nbytes);
962 nbytes = account(r, nbytes, min, reserved); 963 nbytes = account(r, nbytes, min, reserved);
@@ -967,6 +968,17 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
967 if (fips_enabled) { 968 if (fips_enabled) {
968 unsigned long flags; 969 unsigned long flags;
969 970
971
972 /* prime last_data value if need be, per fips 140-2 */
973 if (!r->last_data_init) {
974 spin_lock_irqsave(&r->lock, flags);
975 memcpy(r->last_data, tmp, EXTRACT_SIZE);
976 r->last_data_init = true;
977 nbytes -= EXTRACT_SIZE;
978 spin_unlock_irqrestore(&r->lock, flags);
979 extract_buf(r, tmp);
980 }
981
970 spin_lock_irqsave(&r->lock, flags); 982 spin_lock_irqsave(&r->lock, flags);
971 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) 983 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
972 panic("Hardware RNG duplicated output!\n"); 984 panic("Hardware RNG duplicated output!\n");
@@ -1086,6 +1098,7 @@ static void init_std_data(struct entropy_store *r)
1086 1098
1087 r->entropy_count = 0; 1099 r->entropy_count = 0;
1088 r->entropy_total = 0; 1100 r->entropy_total = 0;
1101 r->last_data_init = false;
1089 mix_pool_bytes(r, &now, sizeof(now), NULL); 1102 mix_pool_bytes(r, &now, sizeof(now), NULL);
1090 for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) { 1103 for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
1091 if (!arch_get_random_long(&rv)) 1104 if (!arch_get_random_long(&rv))
@@ -1142,11 +1155,16 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1142 if (n > SEC_XFER_SIZE) 1155 if (n > SEC_XFER_SIZE)
1143 n = SEC_XFER_SIZE; 1156 n = SEC_XFER_SIZE;
1144 1157
1145 DEBUG_ENT("reading %d bits\n", n*8); 1158 DEBUG_ENT("reading %zu bits\n", n*8);
1146 1159
1147 n = extract_entropy_user(&blocking_pool, buf, n); 1160 n = extract_entropy_user(&blocking_pool, buf, n);
1148 1161
1149 DEBUG_ENT("read got %d bits (%d still needed)\n", 1162 if (n < 0) {
1163 retval = n;
1164 break;
1165 }
1166
1167 DEBUG_ENT("read got %zd bits (%zd still needed)\n",
1150 n*8, (nbytes-n)*8); 1168 n*8, (nbytes-n)*8);
1151 1169
1152 if (n == 0) { 1170 if (n == 0) {
@@ -1171,10 +1189,6 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1171 continue; 1189 continue;
1172 } 1190 }
1173 1191
1174 if (n < 0) {
1175 retval = n;
1176 break;
1177 }
1178 count += n; 1192 count += n;
1179 buf += n; 1193 buf += n;
1180 nbytes -= n; 1194 nbytes -= n;
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 517a8ff7121e..6b4c70f7d23d 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -20,6 +20,7 @@ void __init nomadik_clk_init(void)
20 clk_register_clkdev(clk, NULL, "gpio.2"); 20 clk_register_clkdev(clk, NULL, "gpio.2");
21 clk_register_clkdev(clk, NULL, "gpio.3"); 21 clk_register_clkdev(clk, NULL, "gpio.3");
22 clk_register_clkdev(clk, NULL, "rng"); 22 clk_register_clkdev(clk, NULL, "rng");
23 clk_register_clkdev(clk, NULL, "fsmc-nand");
23 24
24 /* 25 /*
25 * The 2.4 MHz TIMCLK reference clock is active at boot time, this is 26 * The 2.4 MHz TIMCLK reference clock is active at boot time, this is
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8ae1f5b19669..682de754d63f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -172,6 +172,7 @@ config GPIO_MSM_V2
172config GPIO_MVEBU 172config GPIO_MVEBU
173 def_bool y 173 def_bool y
174 depends on PLAT_ORION 174 depends on PLAT_ORION
175 depends on OF
175 select GPIO_GENERIC 176 select GPIO_GENERIC
176 select GENERIC_IRQ_CHIP 177 select GENERIC_IRQ_CHIP
177 178
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 6cc87ac8e019..6f2306db8591 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -390,6 +390,7 @@ static int ichx_gpio_probe(struct platform_device *pdev)
390 return -ENODEV; 390 return -ENODEV;
391 } 391 }
392 392
393 spin_lock_init(&ichx_priv.lock);
393 res_base = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPIO); 394 res_base = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPIO);
394 ichx_priv.use_gpio = ich_info->use_gpio; 395 ichx_priv.use_gpio = ich_info->use_gpio;
395 err = ichx_gpio_request_regions(res_base, pdev->name, 396 err = ichx_gpio_request_regions(res_base, pdev->name,
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index d767b534c4af..7d9bd94be8d2 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -41,7 +41,6 @@
41#include <linux/io.h> 41#include <linux/io.h>
42#include <linux/of_irq.h> 42#include <linux/of_irq.h>
43#include <linux/of_device.h> 43#include <linux/of_device.h>
44#include <linux/platform_device.h>
45#include <linux/pinctrl/consumer.h> 44#include <linux/pinctrl/consumer.h>
46 45
47/* 46/*
@@ -469,19 +468,6 @@ static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
469 } 468 }
470} 469}
471 470
472static struct platform_device_id mvebu_gpio_ids[] = {
473 {
474 .name = "orion-gpio",
475 }, {
476 .name = "mv78200-gpio",
477 }, {
478 .name = "armadaxp-gpio",
479 }, {
480 /* sentinel */
481 },
482};
483MODULE_DEVICE_TABLE(platform, mvebu_gpio_ids);
484
485static struct of_device_id mvebu_gpio_of_match[] = { 471static struct of_device_id mvebu_gpio_of_match[] = {
486 { 472 {
487 .compatible = "marvell,orion-gpio", 473 .compatible = "marvell,orion-gpio",
@@ -555,9 +541,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
555 mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK; 541 mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
556 mvchip->chip.ngpio = ngpios; 542 mvchip->chip.ngpio = ngpios;
557 mvchip->chip.can_sleep = 0; 543 mvchip->chip.can_sleep = 0;
558#ifdef CONFIG_OF
559 mvchip->chip.of_node = np; 544 mvchip->chip.of_node = np;
560#endif
561 545
562 spin_lock_init(&mvchip->lock); 546 spin_lock_init(&mvchip->lock);
563 mvchip->membase = devm_request_and_ioremap(&pdev->dev, res); 547 mvchip->membase = devm_request_and_ioremap(&pdev->dev, res);
@@ -698,7 +682,6 @@ static struct platform_driver mvebu_gpio_driver = {
698 .of_match_table = mvebu_gpio_of_match, 682 .of_match_table = mvebu_gpio_of_match,
699 }, 683 },
700 .probe = mvebu_gpio_probe, 684 .probe = mvebu_gpio_probe,
701 .id_table = mvebu_gpio_ids,
702}; 685};
703 686
704static int __init mvebu_gpio_init(void) 687static int __init mvebu_gpio_init(void)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a9151337d5b9..33d20be87db5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -579,7 +579,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
579 * at this point the buffer should be dead, so 579 * at this point the buffer should be dead, so
580 * no new sync objects can be attached. 580 * no new sync objects can be attached.
581 */ 581 */
582 sync_obj = driver->sync_obj_ref(&bo->sync_obj); 582 sync_obj = driver->sync_obj_ref(bo->sync_obj);
583 spin_unlock(&bdev->fence_lock); 583 spin_unlock(&bdev->fence_lock);
584 584
585 atomic_set(&bo->reserved, 0); 585 atomic_set(&bo->reserved, 0);
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
index 149d44a7c584..6c6d440bb2dd 100644
--- a/drivers/hwmon/twl4030-madc-hwmon.c
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -130,7 +130,7 @@ static int twl4030_madc_hwmon_remove(struct platform_device *pdev)
130 130
131static struct platform_driver twl4030_madc_hwmon_driver = { 131static struct platform_driver twl4030_madc_hwmon_driver = {
132 .probe = twl4030_madc_hwmon_probe, 132 .probe = twl4030_madc_hwmon_probe,
133 .remove = __exit_p(twl4030_madc_hwmon_remove), 133 .remove = twl4030_madc_hwmon_remove,
134 .driver = { 134 .driver = {
135 .name = "twl4030_madc_hwmon", 135 .name = "twl4030_madc_hwmon",
136 .owner = THIS_MODULE, 136 .owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index c7bff51fe524..bdca5111eb9d 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -337,6 +337,16 @@ config I2C_BLACKFIN_TWI_CLK_KHZ
337 help 337 help
338 The unit of the TWI clock is kHz. 338 The unit of the TWI clock is kHz.
339 339
340config I2C_CBUS_GPIO
341 tristate "CBUS I2C driver"
342 depends on GENERIC_GPIO
343 help
344 Support for CBUS access using I2C API. Mostly relevant for Nokia
345 Internet Tablets (770, N800 and N810).
346
347 This driver can also be built as a module. If so, the module
348 will be called i2c-cbus-gpio.
349
340config I2C_CPM 350config I2C_CPM
341 tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)" 351 tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)"
342 depends on (CPM1 || CPM2) && OF_I2C 352 depends on (CPM1 || CPM2) && OF_I2C
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index e5cb209d276c..6181f3ff263f 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o
31obj-$(CONFIG_I2C_AT91) += i2c-at91.o 31obj-$(CONFIG_I2C_AT91) += i2c-at91.o
32obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o 32obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
33obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o 33obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
34obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o
34obj-$(CONFIG_I2C_CPM) += i2c-cpm.o 35obj-$(CONFIG_I2C_CPM) += i2c-cpm.o
35obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o 36obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
36obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o 37obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index c02bf208084f..b4575ee4bdf3 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -19,6 +19,8 @@
19 19
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/completion.h> 21#include <linux/completion.h>
22#include <linux/dma-mapping.h>
23#include <linux/dmaengine.h>
22#include <linux/err.h> 24#include <linux/err.h>
23#include <linux/i2c.h> 25#include <linux/i2c.h>
24#include <linux/interrupt.h> 26#include <linux/interrupt.h>
@@ -29,9 +31,11 @@
29#include <linux/of_i2c.h> 31#include <linux/of_i2c.h>
30#include <linux/platform_device.h> 32#include <linux/platform_device.h>
31#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/platform_data/dma-atmel.h>
32 35
33#define TWI_CLK_HZ 100000 /* max 400 Kbits/s */ 36#define TWI_CLK_HZ 100000 /* max 400 Kbits/s */
34#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */ 37#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
38#define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
35 39
36/* AT91 TWI register definitions */ 40/* AT91 TWI register definitions */
37#define AT91_TWI_CR 0x0000 /* Control Register */ 41#define AT91_TWI_CR 0x0000 /* Control Register */
@@ -66,24 +70,39 @@
66#define AT91_TWI_THR 0x0034 /* Transmit Holding Register */ 70#define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
67 71
68struct at91_twi_pdata { 72struct at91_twi_pdata {
69 unsigned clk_max_div; 73 unsigned clk_max_div;
70 unsigned clk_offset; 74 unsigned clk_offset;
71 bool has_unre_flag; 75 bool has_unre_flag;
76 bool has_dma_support;
77 struct at_dma_slave dma_slave;
78};
79
80struct at91_twi_dma {
81 struct dma_chan *chan_rx;
82 struct dma_chan *chan_tx;
83 struct scatterlist sg;
84 struct dma_async_tx_descriptor *data_desc;
85 enum dma_data_direction direction;
86 bool buf_mapped;
87 bool xfer_in_progress;
72}; 88};
73 89
74struct at91_twi_dev { 90struct at91_twi_dev {
75 struct device *dev; 91 struct device *dev;
76 void __iomem *base; 92 void __iomem *base;
77 struct completion cmd_complete; 93 struct completion cmd_complete;
78 struct clk *clk; 94 struct clk *clk;
79 u8 *buf; 95 u8 *buf;
80 size_t buf_len; 96 size_t buf_len;
81 struct i2c_msg *msg; 97 struct i2c_msg *msg;
82 int irq; 98 int irq;
83 unsigned transfer_status; 99 unsigned imr;
84 struct i2c_adapter adapter; 100 unsigned transfer_status;
85 unsigned twi_cwgr_reg; 101 struct i2c_adapter adapter;
86 struct at91_twi_pdata *pdata; 102 unsigned twi_cwgr_reg;
103 struct at91_twi_pdata *pdata;
104 bool use_dma;
105 struct at91_twi_dma dma;
87}; 106};
88 107
89static unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg) 108static unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
@@ -102,6 +121,17 @@ static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
102 AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY); 121 AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY);
103} 122}
104 123
124static void at91_twi_irq_save(struct at91_twi_dev *dev)
125{
126 dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & 0x7;
127 at91_disable_twi_interrupts(dev);
128}
129
130static void at91_twi_irq_restore(struct at91_twi_dev *dev)
131{
132 at91_twi_write(dev, AT91_TWI_IER, dev->imr);
133}
134
105static void at91_init_twi_bus(struct at91_twi_dev *dev) 135static void at91_init_twi_bus(struct at91_twi_dev *dev)
106{ 136{
107 at91_disable_twi_interrupts(dev); 137 at91_disable_twi_interrupts(dev);
@@ -138,6 +168,28 @@ static void __devinit at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
138 dev_dbg(dev->dev, "cdiv %d ckdiv %d\n", cdiv, ckdiv); 168 dev_dbg(dev->dev, "cdiv %d ckdiv %d\n", cdiv, ckdiv);
139} 169}
140 170
171static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
172{
173 struct at91_twi_dma *dma = &dev->dma;
174
175 at91_twi_irq_save(dev);
176
177 if (dma->xfer_in_progress) {
178 if (dma->direction == DMA_FROM_DEVICE)
179 dmaengine_terminate_all(dma->chan_rx);
180 else
181 dmaengine_terminate_all(dma->chan_tx);
182 dma->xfer_in_progress = false;
183 }
184 if (dma->buf_mapped) {
185 dma_unmap_single(dev->dev, sg_dma_address(&dma->sg),
186 dev->buf_len, dma->direction);
187 dma->buf_mapped = false;
188 }
189
190 at91_twi_irq_restore(dev);
191}
192
141static void at91_twi_write_next_byte(struct at91_twi_dev *dev) 193static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
142{ 194{
143 if (dev->buf_len <= 0) 195 if (dev->buf_len <= 0)
@@ -154,6 +206,60 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
154 ++dev->buf; 206 ++dev->buf;
155} 207}
156 208
209static void at91_twi_write_data_dma_callback(void *data)
210{
211 struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
212
213 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
214 dev->buf_len, DMA_MEM_TO_DEV);
215
216 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
217}
218
219static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
220{
221 dma_addr_t dma_addr;
222 struct dma_async_tx_descriptor *txdesc;
223 struct at91_twi_dma *dma = &dev->dma;
224 struct dma_chan *chan_tx = dma->chan_tx;
225
226 if (dev->buf_len <= 0)
227 return;
228
229 dma->direction = DMA_TO_DEVICE;
230
231 at91_twi_irq_save(dev);
232 dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
233 DMA_TO_DEVICE);
234 if (dma_mapping_error(dev->dev, dma_addr)) {
235 dev_err(dev->dev, "dma map failed\n");
236 return;
237 }
238 dma->buf_mapped = true;
239 at91_twi_irq_restore(dev);
240 sg_dma_len(&dma->sg) = dev->buf_len;
241 sg_dma_address(&dma->sg) = dma_addr;
242
243 txdesc = dmaengine_prep_slave_sg(chan_tx, &dma->sg, 1, DMA_MEM_TO_DEV,
244 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
245 if (!txdesc) {
246 dev_err(dev->dev, "dma prep slave sg failed\n");
247 goto error;
248 }
249
250 txdesc->callback = at91_twi_write_data_dma_callback;
251 txdesc->callback_param = dev;
252
253 dma->xfer_in_progress = true;
254 dmaengine_submit(txdesc);
255 dma_async_issue_pending(chan_tx);
256
257 return;
258
259error:
260 at91_twi_dma_cleanup(dev);
261}
262
157static void at91_twi_read_next_byte(struct at91_twi_dev *dev) 263static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
158{ 264{
159 if (dev->buf_len <= 0) 265 if (dev->buf_len <= 0)
@@ -179,6 +285,61 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
179 ++dev->buf; 285 ++dev->buf;
180} 286}
181 287
288static void at91_twi_read_data_dma_callback(void *data)
289{
290 struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
291
292 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
293 dev->buf_len, DMA_DEV_TO_MEM);
294
295 /* The last two bytes have to be read without using dma */
296 dev->buf += dev->buf_len - 2;
297 dev->buf_len = 2;
298 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY);
299}
300
301static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
302{
303 dma_addr_t dma_addr;
304 struct dma_async_tx_descriptor *rxdesc;
305 struct at91_twi_dma *dma = &dev->dma;
306 struct dma_chan *chan_rx = dma->chan_rx;
307
308 dma->direction = DMA_FROM_DEVICE;
309
310 /* Keep in mind that we won't use dma to read the last two bytes */
311 at91_twi_irq_save(dev);
312 dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len - 2,
313 DMA_FROM_DEVICE);
314 if (dma_mapping_error(dev->dev, dma_addr)) {
315 dev_err(dev->dev, "dma map failed\n");
316 return;
317 }
318 dma->buf_mapped = true;
319 at91_twi_irq_restore(dev);
320 dma->sg.dma_address = dma_addr;
321 sg_dma_len(&dma->sg) = dev->buf_len - 2;
322
323 rxdesc = dmaengine_prep_slave_sg(chan_rx, &dma->sg, 1, DMA_DEV_TO_MEM,
324 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
325 if (!rxdesc) {
326 dev_err(dev->dev, "dma prep slave sg failed\n");
327 goto error;
328 }
329
330 rxdesc->callback = at91_twi_read_data_dma_callback;
331 rxdesc->callback_param = dev;
332
333 dma->xfer_in_progress = true;
334 dmaengine_submit(rxdesc);
335 dma_async_issue_pending(dma->chan_rx);
336
337 return;
338
339error:
340 at91_twi_dma_cleanup(dev);
341}
342
182static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id) 343static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
183{ 344{
184 struct at91_twi_dev *dev = dev_id; 345 struct at91_twi_dev *dev = dev_id;
@@ -229,12 +390,36 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
229 if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN)) 390 if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN))
230 start_flags |= AT91_TWI_STOP; 391 start_flags |= AT91_TWI_STOP;
231 at91_twi_write(dev, AT91_TWI_CR, start_flags); 392 at91_twi_write(dev, AT91_TWI_CR, start_flags);
232 at91_twi_write(dev, AT91_TWI_IER, 393 /*
394 * When using dma, the last byte has to be read manually in
395 * order to not send the stop command too late and then
396 * to receive extra data. In practice, there are some issues
397 * if you use the dma to read n-1 bytes because of latency.
398 * Reading n-2 bytes with dma and the two last ones manually
399 * seems to be the best solution.
400 */
401 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
402 at91_twi_read_data_dma(dev);
403 /*
404 * It is important to enable TXCOMP irq here because
405 * doing it only when transferring the last two bytes
406 * will mask NACK errors since TXCOMP is set when a
407 * NACK occurs.
408 */
409 at91_twi_write(dev, AT91_TWI_IER,
410 AT91_TWI_TXCOMP);
411 } else
412 at91_twi_write(dev, AT91_TWI_IER,
233 AT91_TWI_TXCOMP | AT91_TWI_RXRDY); 413 AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
234 } else { 414 } else {
235 at91_twi_write_next_byte(dev); 415 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
236 at91_twi_write(dev, AT91_TWI_IER, 416 at91_twi_write_data_dma(dev);
237 AT91_TWI_TXCOMP | AT91_TWI_TXRDY); 417 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
418 } else {
419 at91_twi_write_next_byte(dev);
420 at91_twi_write(dev, AT91_TWI_IER,
421 AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
422 }
238 } 423 }
239 424
240 ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, 425 ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
@@ -242,23 +427,31 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
242 if (ret == 0) { 427 if (ret == 0) {
243 dev_err(dev->dev, "controller timed out\n"); 428 dev_err(dev->dev, "controller timed out\n");
244 at91_init_twi_bus(dev); 429 at91_init_twi_bus(dev);
245 return -ETIMEDOUT; 430 ret = -ETIMEDOUT;
431 goto error;
246 } 432 }
247 if (dev->transfer_status & AT91_TWI_NACK) { 433 if (dev->transfer_status & AT91_TWI_NACK) {
248 dev_dbg(dev->dev, "received nack\n"); 434 dev_dbg(dev->dev, "received nack\n");
249 return -EREMOTEIO; 435 ret = -EREMOTEIO;
436 goto error;
250 } 437 }
251 if (dev->transfer_status & AT91_TWI_OVRE) { 438 if (dev->transfer_status & AT91_TWI_OVRE) {
252 dev_err(dev->dev, "overrun while reading\n"); 439 dev_err(dev->dev, "overrun while reading\n");
253 return -EIO; 440 ret = -EIO;
441 goto error;
254 } 442 }
255 if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) { 443 if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
256 dev_err(dev->dev, "underrun while writing\n"); 444 dev_err(dev->dev, "underrun while writing\n");
257 return -EIO; 445 ret = -EIO;
446 goto error;
258 } 447 }
259 dev_dbg(dev->dev, "transfer complete\n"); 448 dev_dbg(dev->dev, "transfer complete\n");
260 449
261 return 0; 450 return 0;
451
452error:
453 at91_twi_dma_cleanup(dev);
454 return ret;
262} 455}
263 456
264static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) 457static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
@@ -329,36 +522,42 @@ static struct at91_twi_pdata at91rm9200_config = {
329 .clk_max_div = 5, 522 .clk_max_div = 5,
330 .clk_offset = 3, 523 .clk_offset = 3,
331 .has_unre_flag = true, 524 .has_unre_flag = true,
525 .has_dma_support = false,
332}; 526};
333 527
334static struct at91_twi_pdata at91sam9261_config = { 528static struct at91_twi_pdata at91sam9261_config = {
335 .clk_max_div = 5, 529 .clk_max_div = 5,
336 .clk_offset = 4, 530 .clk_offset = 4,
337 .has_unre_flag = false, 531 .has_unre_flag = false,
532 .has_dma_support = false,
338}; 533};
339 534
340static struct at91_twi_pdata at91sam9260_config = { 535static struct at91_twi_pdata at91sam9260_config = {
341 .clk_max_div = 7, 536 .clk_max_div = 7,
342 .clk_offset = 4, 537 .clk_offset = 4,
343 .has_unre_flag = false, 538 .has_unre_flag = false,
539 .has_dma_support = false,
344}; 540};
345 541
346static struct at91_twi_pdata at91sam9g20_config = { 542static struct at91_twi_pdata at91sam9g20_config = {
347 .clk_max_div = 7, 543 .clk_max_div = 7,
348 .clk_offset = 4, 544 .clk_offset = 4,
349 .has_unre_flag = false, 545 .has_unre_flag = false,
546 .has_dma_support = false,
350}; 547};
351 548
352static struct at91_twi_pdata at91sam9g10_config = { 549static struct at91_twi_pdata at91sam9g10_config = {
353 .clk_max_div = 7, 550 .clk_max_div = 7,
354 .clk_offset = 4, 551 .clk_offset = 4,
355 .has_unre_flag = false, 552 .has_unre_flag = false,
553 .has_dma_support = false,
356}; 554};
357 555
358static struct at91_twi_pdata at91sam9x5_config = { 556static struct at91_twi_pdata at91sam9x5_config = {
359 .clk_max_div = 7, 557 .clk_max_div = 7,
360 .clk_offset = 4, 558 .clk_offset = 4,
361 .has_unre_flag = false, 559 .has_unre_flag = false,
560 .has_dma_support = true,
362}; 561};
363 562
364static const struct platform_device_id at91_twi_devtypes[] = { 563static const struct platform_device_id at91_twi_devtypes[] = {
@@ -405,6 +604,90 @@ MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
405#define atmel_twi_dt_ids NULL 604#define atmel_twi_dt_ids NULL
406#endif 605#endif
407 606
607static bool __devinit filter(struct dma_chan *chan, void *slave)
608{
609 struct at_dma_slave *sl = slave;
610
611 if (sl->dma_dev == chan->device->dev) {
612 chan->private = sl;
613 return true;
614 } else {
615 return false;
616 }
617}
618
619static int __devinit at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
620{
621 int ret = 0;
622 struct at_dma_slave *sdata;
623 struct dma_slave_config slave_config;
624 struct at91_twi_dma *dma = &dev->dma;
625
626 sdata = &dev->pdata->dma_slave;
627
628 memset(&slave_config, 0, sizeof(slave_config));
629 slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
630 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
631 slave_config.src_maxburst = 1;
632 slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
633 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
634 slave_config.dst_maxburst = 1;
635 slave_config.device_fc = false;
636
637 if (sdata && sdata->dma_dev) {
638 dma_cap_mask_t mask;
639
640 dma_cap_zero(mask);
641 dma_cap_set(DMA_SLAVE, mask);
642 dma->chan_tx = dma_request_channel(mask, filter, sdata);
643 if (!dma->chan_tx) {
644 dev_err(dev->dev, "no DMA channel available for tx\n");
645 ret = -EBUSY;
646 goto error;
647 }
648 dma->chan_rx = dma_request_channel(mask, filter, sdata);
649 if (!dma->chan_rx) {
650 dev_err(dev->dev, "no DMA channel available for rx\n");
651 ret = -EBUSY;
652 goto error;
653 }
654 } else {
655 ret = -EINVAL;
656 goto error;
657 }
658
659 slave_config.direction = DMA_MEM_TO_DEV;
660 if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
661 dev_err(dev->dev, "failed to configure tx channel\n");
662 ret = -EINVAL;
663 goto error;
664 }
665
666 slave_config.direction = DMA_DEV_TO_MEM;
667 if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
668 dev_err(dev->dev, "failed to configure rx channel\n");
669 ret = -EINVAL;
670 goto error;
671 }
672
673 sg_init_table(&dma->sg, 1);
674 dma->buf_mapped = false;
675 dma->xfer_in_progress = false;
676
677 dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
678 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
679
680 return ret;
681
682error:
683 dev_info(dev->dev, "can't use DMA\n");
684 if (dma->chan_rx)
685 dma_release_channel(dma->chan_rx);
686 if (dma->chan_tx)
687 dma_release_channel(dma->chan_tx);
688 return ret;
689}
690
408static struct at91_twi_pdata * __devinit at91_twi_get_driver_data( 691static struct at91_twi_pdata * __devinit at91_twi_get_driver_data(
409 struct platform_device *pdev) 692 struct platform_device *pdev)
410{ 693{
@@ -413,7 +696,7 @@ static struct at91_twi_pdata * __devinit at91_twi_get_driver_data(
413 match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node); 696 match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
414 if (!match) 697 if (!match)
415 return NULL; 698 return NULL;
416 return match->data; 699 return (struct at91_twi_pdata *)match->data;
417 } 700 }
418 return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data; 701 return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
419} 702}
@@ -423,6 +706,7 @@ static int __devinit at91_twi_probe(struct platform_device *pdev)
423 struct at91_twi_dev *dev; 706 struct at91_twi_dev *dev;
424 struct resource *mem; 707 struct resource *mem;
425 int rc; 708 int rc;
709 u32 phy_addr;
426 710
427 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); 711 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
428 if (!dev) 712 if (!dev)
@@ -433,6 +717,7 @@ static int __devinit at91_twi_probe(struct platform_device *pdev)
433 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 717 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
434 if (!mem) 718 if (!mem)
435 return -ENODEV; 719 return -ENODEV;
720 phy_addr = mem->start;
436 721
437 dev->pdata = at91_twi_get_driver_data(pdev); 722 dev->pdata = at91_twi_get_driver_data(pdev);
438 if (!dev->pdata) 723 if (!dev->pdata)
@@ -462,6 +747,11 @@ static int __devinit at91_twi_probe(struct platform_device *pdev)
462 } 747 }
463 clk_prepare_enable(dev->clk); 748 clk_prepare_enable(dev->clk);
464 749
750 if (dev->pdata->has_dma_support) {
751 if (at91_twi_configure_dma(dev, phy_addr) == 0)
752 dev->use_dma = true;
753 }
754
465 at91_calc_twi_clock(dev, TWI_CLK_HZ); 755 at91_calc_twi_clock(dev, TWI_CLK_HZ);
466 at91_init_twi_bus(dev); 756 at91_init_twi_bus(dev);
467 757
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
new file mode 100644
index 000000000000..98386d659318
--- /dev/null
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -0,0 +1,300 @@
1/*
2 * CBUS I2C driver for Nokia Internet Tablets.
3 *
4 * Copyright (C) 2004-2010 Nokia Corporation
5 *
6 * Based on code written by Juha Yrjölä, David Weinehall, Mikko Ylinen and
7 * Felipe Balbi. Converted to I2C driver by Aaro Koskinen.
8 *
9 * This file is subject to the terms and conditions of the GNU General
10 * Public License. See the file "COPYING" in the main directory of this
11 * archive for more details.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/io.h>
20#include <linux/i2c.h>
21#include <linux/gpio.h>
22#include <linux/init.h>
23#include <linux/slab.h>
24#include <linux/delay.h>
25#include <linux/errno.h>
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/of_gpio.h>
29#include <linux/interrupt.h>
30#include <linux/platform_device.h>
31#include <linux/platform_data/i2c-cbus-gpio.h>
32
33/*
34 * Bit counts are derived from Nokia implementation. These should be checked
35 * if other CBUS implementations appear.
36 */
37#define CBUS_ADDR_BITS 3
38#define CBUS_REG_BITS 5
39
40struct cbus_host {
41 spinlock_t lock; /* host lock */
42 struct device *dev;
43 int clk_gpio;
44 int dat_gpio;
45 int sel_gpio;
46};
47
48/**
49 * cbus_send_bit - sends one bit over the bus
50 * @host: the host we're using
51 * @bit: one bit of information to send
52 */
53static void cbus_send_bit(struct cbus_host *host, unsigned bit)
54{
55 gpio_set_value(host->dat_gpio, bit ? 1 : 0);
56 gpio_set_value(host->clk_gpio, 1);
57 gpio_set_value(host->clk_gpio, 0);
58}
59
60/**
61 * cbus_send_data - sends @len amount of data over the bus
62 * @host: the host we're using
63 * @data: the data to send
64 * @len: size of the transfer
65 */
66static void cbus_send_data(struct cbus_host *host, unsigned data, unsigned len)
67{
68 int i;
69
70 for (i = len; i > 0; i--)
71 cbus_send_bit(host, data & (1 << (i - 1)));
72}
73
74/**
75 * cbus_receive_bit - receives one bit from the bus
76 * @host: the host we're using
77 */
78static int cbus_receive_bit(struct cbus_host *host)
79{
80 int ret;
81
82 gpio_set_value(host->clk_gpio, 1);
83 ret = gpio_get_value(host->dat_gpio);
84 gpio_set_value(host->clk_gpio, 0);
85 return ret;
86}
87
88/**
89 * cbus_receive_word - receives 16-bit word from the bus
90 * @host: the host we're using
91 */
92static int cbus_receive_word(struct cbus_host *host)
93{
94 int ret = 0;
95 int i;
96
97 for (i = 16; i > 0; i--) {
98 int bit = cbus_receive_bit(host);
99
100 if (bit < 0)
101 return bit;
102
103 if (bit)
104 ret |= 1 << (i - 1);
105 }
106 return ret;
107}
108
109/**
110 * cbus_transfer - transfers data over the bus
111 * @host: the host we're using
112 * @rw: read/write flag
113 * @dev: device address
114 * @reg: register address
115 * @data: if @rw == I2C_SBUS_WRITE data to send otherwise 0
116 */
117static int cbus_transfer(struct cbus_host *host, char rw, unsigned dev,
118 unsigned reg, unsigned data)
119{
120 unsigned long flags;
121 int ret;
122
123 /* We don't want interrupts disturbing our transfer */
124 spin_lock_irqsave(&host->lock, flags);
125
126 /* Reset state and start of transfer, SEL stays down during transfer */
127 gpio_set_value(host->sel_gpio, 0);
128
129 /* Set the DAT pin to output */
130 gpio_direction_output(host->dat_gpio, 1);
131
132 /* Send the device address */
133 cbus_send_data(host, dev, CBUS_ADDR_BITS);
134
135 /* Send the rw flag */
136 cbus_send_bit(host, rw == I2C_SMBUS_READ);
137
138 /* Send the register address */
139 cbus_send_data(host, reg, CBUS_REG_BITS);
140
141 if (rw == I2C_SMBUS_WRITE) {
142 cbus_send_data(host, data, 16);
143 ret = 0;
144 } else {
145 ret = gpio_direction_input(host->dat_gpio);
146 if (ret) {
147 dev_dbg(host->dev, "failed setting direction\n");
148 goto out;
149 }
150 gpio_set_value(host->clk_gpio, 1);
151
152 ret = cbus_receive_word(host);
153 if (ret < 0) {
154 dev_dbg(host->dev, "failed receiving data\n");
155 goto out;
156 }
157 }
158
159 /* Indicate end of transfer, SEL goes up until next transfer */
160 gpio_set_value(host->sel_gpio, 1);
161 gpio_set_value(host->clk_gpio, 1);
162 gpio_set_value(host->clk_gpio, 0);
163
164out:
165 spin_unlock_irqrestore(&host->lock, flags);
166
167 return ret;
168}
169
170static int cbus_i2c_smbus_xfer(struct i2c_adapter *adapter,
171 u16 addr,
172 unsigned short flags,
173 char read_write,
174 u8 command,
175 int size,
176 union i2c_smbus_data *data)
177{
178 struct cbus_host *chost = i2c_get_adapdata(adapter);
179 int ret;
180
181 if (size != I2C_SMBUS_WORD_DATA)
182 return -EINVAL;
183
184 ret = cbus_transfer(chost, read_write == I2C_SMBUS_READ, addr,
185 command, data->word);
186 if (ret < 0)
187 return ret;
188
189 if (read_write == I2C_SMBUS_READ)
190 data->word = ret;
191
192 return 0;
193}
194
195static u32 cbus_i2c_func(struct i2c_adapter *adapter)
196{
197 return I2C_FUNC_SMBUS_READ_WORD_DATA | I2C_FUNC_SMBUS_WRITE_WORD_DATA;
198}
199
200static const struct i2c_algorithm cbus_i2c_algo = {
201 .smbus_xfer = cbus_i2c_smbus_xfer,
202 .functionality = cbus_i2c_func,
203};
204
205static int cbus_i2c_remove(struct platform_device *pdev)
206{
207 struct i2c_adapter *adapter = platform_get_drvdata(pdev);
208
209 return i2c_del_adapter(adapter);
210}
211
212static int cbus_i2c_probe(struct platform_device *pdev)
213{
214 struct i2c_adapter *adapter;
215 struct cbus_host *chost;
216 int ret;
217
218 adapter = devm_kzalloc(&pdev->dev, sizeof(struct i2c_adapter),
219 GFP_KERNEL);
220 if (!adapter)
221 return -ENOMEM;
222
223 chost = devm_kzalloc(&pdev->dev, sizeof(*chost), GFP_KERNEL);
224 if (!chost)
225 return -ENOMEM;
226
227 if (pdev->dev.of_node) {
228 struct device_node *dnode = pdev->dev.of_node;
229 if (of_gpio_count(dnode) != 3)
230 return -ENODEV;
231 chost->clk_gpio = of_get_gpio(dnode, 0);
232 chost->dat_gpio = of_get_gpio(dnode, 1);
233 chost->sel_gpio = of_get_gpio(dnode, 2);
234 } else if (pdev->dev.platform_data) {
235 struct i2c_cbus_platform_data *pdata = pdev->dev.platform_data;
236 chost->clk_gpio = pdata->clk_gpio;
237 chost->dat_gpio = pdata->dat_gpio;
238 chost->sel_gpio = pdata->sel_gpio;
239 } else {
240 return -ENODEV;
241 }
242
243 adapter->owner = THIS_MODULE;
244 adapter->class = I2C_CLASS_HWMON;
245 adapter->dev.parent = &pdev->dev;
246 adapter->nr = pdev->id;
247 adapter->timeout = HZ;
248 adapter->algo = &cbus_i2c_algo;
249 strlcpy(adapter->name, "CBUS I2C adapter", sizeof(adapter->name));
250
251 spin_lock_init(&chost->lock);
252 chost->dev = &pdev->dev;
253
254 ret = devm_gpio_request_one(&pdev->dev, chost->clk_gpio,
255 GPIOF_OUT_INIT_LOW, "CBUS clk");
256 if (ret)
257 return ret;
258
259 ret = devm_gpio_request_one(&pdev->dev, chost->dat_gpio, GPIOF_IN,
260 "CBUS data");
261 if (ret)
262 return ret;
263
264 ret = devm_gpio_request_one(&pdev->dev, chost->sel_gpio,
265 GPIOF_OUT_INIT_HIGH, "CBUS sel");
266 if (ret)
267 return ret;
268
269 i2c_set_adapdata(adapter, chost);
270 platform_set_drvdata(pdev, adapter);
271
272 return i2c_add_numbered_adapter(adapter);
273}
274
275#if defined(CONFIG_OF)
276static const struct of_device_id i2c_cbus_dt_ids[] = {
277 { .compatible = "i2c-cbus-gpio", },
278 { }
279};
280MODULE_DEVICE_TABLE(of, i2c_cbus_dt_ids);
281#endif
282
283static struct platform_driver cbus_i2c_driver = {
284 .probe = cbus_i2c_probe,
285 .remove = cbus_i2c_remove,
286 .driver = {
287 .owner = THIS_MODULE,
288 .name = "i2c-cbus-gpio",
289 },
290};
291module_platform_driver(cbus_i2c_driver);
292
293MODULE_ALIAS("platform:i2c-cbus-gpio");
294MODULE_DESCRIPTION("CBUS I2C driver");
295MODULE_AUTHOR("Juha Yrjölä");
296MODULE_AUTHOR("David Weinehall");
297MODULE_AUTHOR("Mikko Ylinen");
298MODULE_AUTHOR("Felipe Balbi");
299MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
300MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index e62d2d938628..257299a92df3 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -184,7 +184,11 @@ static int __devinit i2c_gpio_probe(struct platform_device *pdev)
184 bit_data->data = pdata; 184 bit_data->data = pdata;
185 185
186 adap->owner = THIS_MODULE; 186 adap->owner = THIS_MODULE;
187 snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id); 187 if (pdev->dev.of_node)
188 strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
189 else
190 snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id);
191
188 adap->algo_data = bit_data; 192 adap->algo_data = bit_data;
189 adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 193 adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
190 adap->dev.parent = &pdev->dev; 194 adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 0670da79ee5e..6ed53da9e1f4 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -359,7 +359,7 @@ static int mxs_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
359 359
360static u32 mxs_i2c_func(struct i2c_adapter *adap) 360static u32 mxs_i2c_func(struct i2c_adapter *adap)
361{ 361{
362 return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); 362 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
363} 363}
364 364
365static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id) 365static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 02c3115a2dfa..8b2ffcf45322 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -435,13 +435,6 @@ static int read_i2c(struct nmk_i2c_dev *dev, u16 flags)
435 timeout = wait_for_completion_timeout( 435 timeout = wait_for_completion_timeout(
436 &dev->xfer_complete, dev->adap.timeout); 436 &dev->xfer_complete, dev->adap.timeout);
437 437
438 if (timeout < 0) {
439 dev_err(&dev->adev->dev,
440 "wait_for_completion_timeout "
441 "returned %d waiting for event\n", timeout);
442 status = timeout;
443 }
444
445 if (timeout == 0) { 438 if (timeout == 0) {
446 /* Controller timed out */ 439 /* Controller timed out */
447 dev_err(&dev->adev->dev, "read from slave 0x%x timed out\n", 440 dev_err(&dev->adev->dev, "read from slave 0x%x timed out\n",
@@ -523,13 +516,6 @@ static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
523 timeout = wait_for_completion_timeout( 516 timeout = wait_for_completion_timeout(
524 &dev->xfer_complete, dev->adap.timeout); 517 &dev->xfer_complete, dev->adap.timeout);
525 518
526 if (timeout < 0) {
527 dev_err(&dev->adev->dev,
528 "wait_for_completion_timeout "
529 "returned %d waiting for event\n", timeout);
530 status = timeout;
531 }
532
533 if (timeout == 0) { 519 if (timeout == 0) {
534 /* Controller timed out */ 520 /* Controller timed out */
535 dev_err(&dev->adev->dev, "write to slave 0x%x timed out\n", 521 dev_err(&dev->adev->dev, "write to slave 0x%x timed out\n",
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 15da1ac7cf9e..9b35c9fbb2fe 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -4,6 +4,9 @@
4 * 4 *
5 * Peter Korsgaard <jacmet@sunsite.dk> 5 * Peter Korsgaard <jacmet@sunsite.dk>
6 * 6 *
7 * Support for the GRLIB port of the controller by
8 * Andreas Larsson <andreas@gaisler.com>
9 *
7 * This file is licensed under the terms of the GNU General Public License 10 * This file is licensed under the terms of the GNU General Public License
8 * version 2. This program is licensed "as is" without any warranty of any 11 * version 2. This program is licensed "as is" without any warranty of any
9 * kind, whether express or implied. 12 * kind, whether express or implied.
@@ -34,6 +37,8 @@ struct ocores_i2c {
34 int nmsgs; 37 int nmsgs;
35 int state; /* see STATE_ */ 38 int state; /* see STATE_ */
36 int clock_khz; 39 int clock_khz;
40 void (*setreg)(struct ocores_i2c *i2c, int reg, u8 value);
41 u8 (*getreg)(struct ocores_i2c *i2c, int reg);
37}; 42};
38 43
39/* registers */ 44/* registers */
@@ -67,24 +72,47 @@ struct ocores_i2c {
67#define STATE_READ 3 72#define STATE_READ 3
68#define STATE_ERROR 4 73#define STATE_ERROR 4
69 74
75#define TYPE_OCORES 0
76#define TYPE_GRLIB 1
77
78static void oc_setreg_8(struct ocores_i2c *i2c, int reg, u8 value)
79{
80 iowrite8(value, i2c->base + (reg << i2c->reg_shift));
81}
82
83static void oc_setreg_16(struct ocores_i2c *i2c, int reg, u8 value)
84{
85 iowrite16(value, i2c->base + (reg << i2c->reg_shift));
86}
87
88static void oc_setreg_32(struct ocores_i2c *i2c, int reg, u8 value)
89{
90 iowrite32(value, i2c->base + (reg << i2c->reg_shift));
91}
92
93static inline u8 oc_getreg_8(struct ocores_i2c *i2c, int reg)
94{
95 return ioread8(i2c->base + (reg << i2c->reg_shift));
96}
97
98static inline u8 oc_getreg_16(struct ocores_i2c *i2c, int reg)
99{
100 return ioread16(i2c->base + (reg << i2c->reg_shift));
101}
102
103static inline u8 oc_getreg_32(struct ocores_i2c *i2c, int reg)
104{
105 return ioread32(i2c->base + (reg << i2c->reg_shift));
106}
107
70static inline void oc_setreg(struct ocores_i2c *i2c, int reg, u8 value) 108static inline void oc_setreg(struct ocores_i2c *i2c, int reg, u8 value)
71{ 109{
72 if (i2c->reg_io_width == 4) 110 i2c->setreg(i2c, reg, value);
73 iowrite32(value, i2c->base + (reg << i2c->reg_shift));
74 else if (i2c->reg_io_width == 2)
75 iowrite16(value, i2c->base + (reg << i2c->reg_shift));
76 else
77 iowrite8(value, i2c->base + (reg << i2c->reg_shift));
78} 111}
79 112
80static inline u8 oc_getreg(struct ocores_i2c *i2c, int reg) 113static inline u8 oc_getreg(struct ocores_i2c *i2c, int reg)
81{ 114{
82 if (i2c->reg_io_width == 4) 115 return i2c->getreg(i2c, reg);
83 return ioread32(i2c->base + (reg << i2c->reg_shift));
84 else if (i2c->reg_io_width == 2)
85 return ioread16(i2c->base + (reg << i2c->reg_shift));
86 else
87 return ioread8(i2c->base + (reg << i2c->reg_shift));
88} 116}
89 117
90static void ocores_process(struct ocores_i2c *i2c) 118static void ocores_process(struct ocores_i2c *i2c)
@@ -223,11 +251,59 @@ static struct i2c_adapter ocores_adapter = {
223 .algo = &ocores_algorithm, 251 .algo = &ocores_algorithm,
224}; 252};
225 253
254static struct of_device_id ocores_i2c_match[] = {
255 {
256 .compatible = "opencores,i2c-ocores",
257 .data = (void *)TYPE_OCORES,
258 },
259 {
260 .compatible = "aeroflexgaisler,i2cmst",
261 .data = (void *)TYPE_GRLIB,
262 },
263 {},
264};
265MODULE_DEVICE_TABLE(of, ocores_i2c_match);
266
226#ifdef CONFIG_OF 267#ifdef CONFIG_OF
268/* Read and write functions for the GRLIB port of the controller. Registers are
269 * 32-bit big endian and the PRELOW and PREHIGH registers are merged into one
270 * register. The subsequent registers has their offset decreased accordingly. */
271static u8 oc_getreg_grlib(struct ocores_i2c *i2c, int reg)
272{
273 u32 rd;
274 int rreg = reg;
275 if (reg != OCI2C_PRELOW)
276 rreg--;
277 rd = ioread32be(i2c->base + (rreg << i2c->reg_shift));
278 if (reg == OCI2C_PREHIGH)
279 return (u8)(rd >> 8);
280 else
281 return (u8)rd;
282}
283
284static void oc_setreg_grlib(struct ocores_i2c *i2c, int reg, u8 value)
285{
286 u32 curr, wr;
287 int rreg = reg;
288 if (reg != OCI2C_PRELOW)
289 rreg--;
290 if (reg == OCI2C_PRELOW || reg == OCI2C_PREHIGH) {
291 curr = ioread32be(i2c->base + (rreg << i2c->reg_shift));
292 if (reg == OCI2C_PRELOW)
293 wr = (curr & 0xff00) | value;
294 else
295 wr = (((u32)value) << 8) | (curr & 0xff);
296 } else {
297 wr = value;
298 }
299 iowrite32be(wr, i2c->base + (rreg << i2c->reg_shift));
300}
301
227static int ocores_i2c_of_probe(struct platform_device *pdev, 302static int ocores_i2c_of_probe(struct platform_device *pdev,
228 struct ocores_i2c *i2c) 303 struct ocores_i2c *i2c)
229{ 304{
230 struct device_node *np = pdev->dev.of_node; 305 struct device_node *np = pdev->dev.of_node;
306 const struct of_device_id *match;
231 u32 val; 307 u32 val;
232 308
233 if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) { 309 if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) {
@@ -253,6 +329,14 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
253 329
254 of_property_read_u32(pdev->dev.of_node, "reg-io-width", 330 of_property_read_u32(pdev->dev.of_node, "reg-io-width",
255 &i2c->reg_io_width); 331 &i2c->reg_io_width);
332
333 match = of_match_node(ocores_i2c_match, pdev->dev.of_node);
334 if (match && (int)match->data == TYPE_GRLIB) {
335 dev_dbg(&pdev->dev, "GRLIB variant of i2c-ocores\n");
336 i2c->setreg = oc_setreg_grlib;
337 i2c->getreg = oc_getreg_grlib;
338 }
339
256 return 0; 340 return 0;
257} 341}
258#else 342#else
@@ -263,7 +347,8 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
263{ 347{
264 struct ocores_i2c *i2c; 348 struct ocores_i2c *i2c;
265 struct ocores_i2c_platform_data *pdata; 349 struct ocores_i2c_platform_data *pdata;
266 struct resource *res, *res2; 350 struct resource *res;
351 int irq;
267 int ret; 352 int ret;
268 int i; 353 int i;
269 354
@@ -271,26 +356,17 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
271 if (!res) 356 if (!res)
272 return -ENODEV; 357 return -ENODEV;
273 358
274 res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 359 irq = platform_get_irq(pdev, 0);
275 if (!res2) 360 if (irq < 0)
276 return -ENODEV; 361 return irq;
277 362
278 i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); 363 i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
279 if (!i2c) 364 if (!i2c)
280 return -ENOMEM; 365 return -ENOMEM;
281 366
282 if (!devm_request_mem_region(&pdev->dev, res->start, 367 i2c->base = devm_request_and_ioremap(&pdev->dev, res);
283 resource_size(res), pdev->name)) { 368 if (!i2c->base)
284 dev_err(&pdev->dev, "Memory region busy\n"); 369 return -EADDRNOTAVAIL;
285 return -EBUSY;
286 }
287
288 i2c->base = devm_ioremap_nocache(&pdev->dev, res->start,
289 resource_size(res));
290 if (!i2c->base) {
291 dev_err(&pdev->dev, "Unable to map registers\n");
292 return -EIO;
293 }
294 370
295 pdata = pdev->dev.platform_data; 371 pdata = pdev->dev.platform_data;
296 if (pdata) { 372 if (pdata) {
@@ -306,10 +382,34 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
306 if (i2c->reg_io_width == 0) 382 if (i2c->reg_io_width == 0)
307 i2c->reg_io_width = 1; /* Set to default value */ 383 i2c->reg_io_width = 1; /* Set to default value */
308 384
385 if (!i2c->setreg || !i2c->getreg) {
386 switch (i2c->reg_io_width) {
387 case 1:
388 i2c->setreg = oc_setreg_8;
389 i2c->getreg = oc_getreg_8;
390 break;
391
392 case 2:
393 i2c->setreg = oc_setreg_16;
394 i2c->getreg = oc_getreg_16;
395 break;
396
397 case 4:
398 i2c->setreg = oc_setreg_32;
399 i2c->getreg = oc_getreg_32;
400 break;
401
402 default:
403 dev_err(&pdev->dev, "Unsupported I/O width (%d)\n",
404 i2c->reg_io_width);
405 return -EINVAL;
406 }
407 }
408
309 ocores_init(i2c); 409 ocores_init(i2c);
310 410
311 init_waitqueue_head(&i2c->wait); 411 init_waitqueue_head(&i2c->wait);
312 ret = devm_request_irq(&pdev->dev, res2->start, ocores_isr, 0, 412 ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
313 pdev->name, i2c); 413 pdev->name, i2c);
314 if (ret) { 414 if (ret) {
315 dev_err(&pdev->dev, "Cannot claim IRQ\n"); 415 dev_err(&pdev->dev, "Cannot claim IRQ\n");
@@ -383,12 +483,6 @@ static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume);
383#define OCORES_I2C_PM NULL 483#define OCORES_I2C_PM NULL
384#endif 484#endif
385 485
386static struct of_device_id ocores_i2c_match[] = {
387 { .compatible = "opencores,i2c-ocores", },
388 {},
389};
390MODULE_DEVICE_TABLE(of, ocores_i2c_match);
391
392static struct platform_driver ocores_i2c_driver = { 486static struct platform_driver ocores_i2c_driver = {
393 .probe = ocores_i2c_probe, 487 .probe = ocores_i2c_probe,
394 .remove = __devexit_p(ocores_i2c_remove), 488 .remove = __devexit_p(ocores_i2c_remove),
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 3525c9e62cb0..7a62acb7d262 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -43,14 +43,16 @@
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/i2c-omap.h> 44#include <linux/i2c-omap.h>
45#include <linux/pm_runtime.h> 45#include <linux/pm_runtime.h>
46#include <linux/pinctrl/consumer.h>
46 47
47/* I2C controller revisions */ 48/* I2C controller revisions */
48#define OMAP_I2C_OMAP1_REV_2 0x20 49#define OMAP_I2C_OMAP1_REV_2 0x20
49 50
50/* I2C controller revisions present on specific hardware */ 51/* I2C controller revisions present on specific hardware */
51#define OMAP_I2C_REV_ON_2430 0x36 52#define OMAP_I2C_REV_ON_2430 0x00000036
52#define OMAP_I2C_REV_ON_3430_3530 0x3C 53#define OMAP_I2C_REV_ON_3430_3530 0x0000003C
53#define OMAP_I2C_REV_ON_3630_4430 0x40 54#define OMAP_I2C_REV_ON_3630 0x00000040
55#define OMAP_I2C_REV_ON_4430_PLUS 0x50400002
54 56
55/* timeout waiting for the controller to respond */ 57/* timeout waiting for the controller to respond */
56#define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000)) 58#define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000))
@@ -190,7 +192,6 @@ struct omap_i2c_dev {
190 void (*set_mpu_wkup_lat)(struct device *dev, 192 void (*set_mpu_wkup_lat)(struct device *dev,
191 long latency); 193 long latency);
192 u32 speed; /* Speed of bus in kHz */ 194 u32 speed; /* Speed of bus in kHz */
193 u32 dtrev; /* extra revision from DT */
194 u32 flags; 195 u32 flags;
195 u16 cmd_err; 196 u16 cmd_err;
196 u8 *buf; 197 u8 *buf;
@@ -202,17 +203,18 @@ struct omap_i2c_dev {
202 * fifo_size==0 implies no fifo 203 * fifo_size==0 implies no fifo
203 * if set, should be trsh+1 204 * if set, should be trsh+1
204 */ 205 */
205 u8 rev; 206 u32 rev;
206 unsigned b_hw:1; /* bad h/w fixes */ 207 unsigned b_hw:1; /* bad h/w fixes */
207 unsigned receiver:1; /* true when we're in receiver mode */ 208 unsigned receiver:1; /* true when we're in receiver mode */
208 u16 iestate; /* Saved interrupt register */ 209 u16 iestate; /* Saved interrupt register */
209 u16 pscstate; 210 u16 pscstate;
210 u16 scllstate; 211 u16 scllstate;
211 u16 sclhstate; 212 u16 sclhstate;
212 u16 bufstate;
213 u16 syscstate; 213 u16 syscstate;
214 u16 westate; 214 u16 westate;
215 u16 errata; 215 u16 errata;
216
217 struct pinctrl *pins;
216}; 218};
217 219
218static const u8 reg_map_ip_v1[] = { 220static const u8 reg_map_ip_v1[] = {
@@ -275,16 +277,39 @@ static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
275 (i2c_dev->regs[reg] << i2c_dev->reg_shift)); 277 (i2c_dev->regs[reg] << i2c_dev->reg_shift));
276} 278}
277 279
278static int omap_i2c_init(struct omap_i2c_dev *dev) 280static void __omap_i2c_init(struct omap_i2c_dev *dev)
281{
282
283 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
284
285 /* Setup clock prescaler to obtain approx 12MHz I2C module clock: */
286 omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
287
288 /* SCL low and high time values */
289 omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
290 omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, dev->sclhstate);
291 if (dev->rev >= OMAP_I2C_REV_ON_3430_3530)
292 omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
293
294 /* Take the I2C module out of reset: */
295 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
296
297 /*
298 * Don't write to this register if the IE state is 0 as it can
299 * cause deadlock.
300 */
301 if (dev->iestate)
302 omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
303}
304
305static int omap_i2c_reset(struct omap_i2c_dev *dev)
279{ 306{
280 u16 psc = 0, scll = 0, sclh = 0, buf = 0;
281 u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0;
282 unsigned long fclk_rate = 12000000;
283 unsigned long timeout; 307 unsigned long timeout;
284 unsigned long internal_clk = 0; 308 u16 sysc;
285 struct clk *fclk;
286 309
287 if (dev->rev >= OMAP_I2C_OMAP1_REV_2) { 310 if (dev->rev >= OMAP_I2C_OMAP1_REV_2) {
311 sysc = omap_i2c_read_reg(dev, OMAP_I2C_SYSC_REG);
312
288 /* Disable I2C controller before soft reset */ 313 /* Disable I2C controller before soft reset */
289 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 314 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
290 omap_i2c_read_reg(dev, OMAP_I2C_CON_REG) & 315 omap_i2c_read_reg(dev, OMAP_I2C_CON_REG) &
@@ -306,32 +331,28 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
306 } 331 }
307 332
308 /* SYSC register is cleared by the reset; rewrite it */ 333 /* SYSC register is cleared by the reset; rewrite it */
309 if (dev->rev == OMAP_I2C_REV_ON_2430) { 334 omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, sysc);
310 335
311 omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, 336 }
312 SYSC_AUTOIDLE_MASK); 337 return 0;
313 338}
314 } else if (dev->rev >= OMAP_I2C_REV_ON_3430_3530) { 339
315 dev->syscstate = SYSC_AUTOIDLE_MASK; 340static int omap_i2c_init(struct omap_i2c_dev *dev)
316 dev->syscstate |= SYSC_ENAWAKEUP_MASK; 341{
317 dev->syscstate |= (SYSC_IDLEMODE_SMART << 342 u16 psc = 0, scll = 0, sclh = 0;
318 __ffs(SYSC_SIDLEMODE_MASK)); 343 u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0;
319 dev->syscstate |= (SYSC_CLOCKACTIVITY_FCLK << 344 unsigned long fclk_rate = 12000000;
320 __ffs(SYSC_CLOCKACTIVITY_MASK)); 345 unsigned long internal_clk = 0;
321 346 struct clk *fclk;
322 omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, 347
323 dev->syscstate); 348 if (dev->rev >= OMAP_I2C_REV_ON_3430_3530) {
324 /* 349 /*
325 * Enabling all wakup sources to stop I2C freezing on 350 * Enabling all wakup sources to stop I2C freezing on
326 * WFI instruction. 351 * WFI instruction.
327 * REVISIT: Some wkup sources might not be needed. 352 * REVISIT: Some wkup sources might not be needed.
328 */ 353 */
329 dev->westate = OMAP_I2C_WE_ALL; 354 dev->westate = OMAP_I2C_WE_ALL;
330 omap_i2c_write_reg(dev, OMAP_I2C_WE_REG,
331 dev->westate);
332 }
333 } 355 }
334 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
335 356
336 if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) { 357 if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
337 /* 358 /*
@@ -416,28 +437,17 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
416 sclh = fclk_rate / (dev->speed * 2) - 7 + psc; 437 sclh = fclk_rate / (dev->speed * 2) - 7 + psc;
417 } 438 }
418 439
419 /* Setup clock prescaler to obtain approx 12MHz I2C module clock: */
420 omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, psc);
421
422 /* SCL low and high time values */
423 omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, scll);
424 omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, sclh);
425
426 /* Take the I2C module out of reset: */
427 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
428
429 /* Enable interrupts */
430 dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY | 440 dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY |
431 OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK | 441 OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK |
432 OMAP_I2C_IE_AL) | ((dev->fifo_size) ? 442 OMAP_I2C_IE_AL) | ((dev->fifo_size) ?
433 (OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0); 443 (OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
434 omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate); 444
435 if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) { 445 dev->pscstate = psc;
436 dev->pscstate = psc; 446 dev->scllstate = scll;
437 dev->scllstate = scll; 447 dev->sclhstate = sclh;
438 dev->sclhstate = sclh; 448
439 dev->bufstate = buf; 449 __omap_i2c_init(dev);
440 } 450
441 return 0; 451 return 0;
442} 452}
443 453
@@ -490,7 +500,7 @@ static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx)
490 500
491 omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, buf); 501 omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, buf);
492 502
493 if (dev->rev < OMAP_I2C_REV_ON_3630_4430) 503 if (dev->rev < OMAP_I2C_REV_ON_3630)
494 dev->b_hw = 1; /* Enable hardware fixes */ 504 dev->b_hw = 1; /* Enable hardware fixes */
495 505
496 /* calculate wakeup latency constraint for MPU */ 506 /* calculate wakeup latency constraint for MPU */
@@ -586,7 +596,8 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
586 OMAP_I2C_TIMEOUT); 596 OMAP_I2C_TIMEOUT);
587 if (timeout == 0) { 597 if (timeout == 0) {
588 dev_err(dev->dev, "controller timed out\n"); 598 dev_err(dev->dev, "controller timed out\n");
589 omap_i2c_init(dev); 599 omap_i2c_reset(dev);
600 __omap_i2c_init(dev);
590 return -ETIMEDOUT; 601 return -ETIMEDOUT;
591 } 602 }
592 603
@@ -596,7 +607,8 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
596 /* We have an error */ 607 /* We have an error */
597 if (dev->cmd_err & (OMAP_I2C_STAT_AL | OMAP_I2C_STAT_ROVR | 608 if (dev->cmd_err & (OMAP_I2C_STAT_AL | OMAP_I2C_STAT_ROVR |
598 OMAP_I2C_STAT_XUDF)) { 609 OMAP_I2C_STAT_XUDF)) {
599 omap_i2c_init(dev); 610 omap_i2c_reset(dev);
611 __omap_i2c_init(dev);
600 return -EIO; 612 return -EIO;
601 } 613 }
602 614
@@ -642,13 +654,14 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
642 break; 654 break;
643 } 655 }
644 656
645 if (dev->set_mpu_wkup_lat != NULL)
646 dev->set_mpu_wkup_lat(dev->dev, -1);
647
648 if (r == 0) 657 if (r == 0)
649 r = num; 658 r = num;
650 659
651 omap_i2c_wait_for_bb(dev); 660 omap_i2c_wait_for_bb(dev);
661
662 if (dev->set_mpu_wkup_lat != NULL)
663 dev->set_mpu_wkup_lat(dev->dev, -1);
664
652out: 665out:
653 pm_runtime_mark_last_busy(dev->dev); 666 pm_runtime_mark_last_busy(dev->dev);
654 pm_runtime_put_autosuspend(dev->dev); 667 pm_runtime_put_autosuspend(dev->dev);
@@ -1025,9 +1038,7 @@ static const struct i2c_algorithm omap_i2c_algo = {
1025#ifdef CONFIG_OF 1038#ifdef CONFIG_OF
1026static struct omap_i2c_bus_platform_data omap3_pdata = { 1039static struct omap_i2c_bus_platform_data omap3_pdata = {
1027 .rev = OMAP_I2C_IP_VERSION_1, 1040 .rev = OMAP_I2C_IP_VERSION_1,
1028 .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 | 1041 .flags = OMAP_I2C_FLAG_BUS_SHIFT_2,
1029 OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
1030 OMAP_I2C_FLAG_BUS_SHIFT_2,
1031}; 1042};
1032 1043
1033static struct omap_i2c_bus_platform_data omap4_pdata = { 1044static struct omap_i2c_bus_platform_data omap4_pdata = {
@@ -1048,6 +1059,16 @@ static const struct of_device_id omap_i2c_of_match[] = {
1048MODULE_DEVICE_TABLE(of, omap_i2c_of_match); 1059MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
1049#endif 1060#endif
1050 1061
1062#define OMAP_I2C_SCHEME(rev) ((rev & 0xc000) >> 14)
1063
1064#define OMAP_I2C_REV_SCHEME_0_MAJOR(rev) (rev >> 4)
1065#define OMAP_I2C_REV_SCHEME_0_MINOR(rev) (rev & 0xf)
1066
1067#define OMAP_I2C_REV_SCHEME_1_MAJOR(rev) ((rev & 0x0700) >> 7)
1068#define OMAP_I2C_REV_SCHEME_1_MINOR(rev) (rev & 0x1f)
1069#define OMAP_I2C_SCHEME_0 0
1070#define OMAP_I2C_SCHEME_1 1
1071
1051static int __devinit 1072static int __devinit
1052omap_i2c_probe(struct platform_device *pdev) 1073omap_i2c_probe(struct platform_device *pdev)
1053{ 1074{
@@ -1060,6 +1081,8 @@ omap_i2c_probe(struct platform_device *pdev)
1060 const struct of_device_id *match; 1081 const struct of_device_id *match;
1061 int irq; 1082 int irq;
1062 int r; 1083 int r;
1084 u32 rev;
1085 u16 minor, major, scheme;
1063 1086
1064 /* NOTE: driver uses the static register mapping */ 1087 /* NOTE: driver uses the static register mapping */
1065 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1088 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1091,7 +1114,6 @@ omap_i2c_probe(struct platform_device *pdev)
1091 u32 freq = 100000; /* default to 100000 Hz */ 1114 u32 freq = 100000; /* default to 100000 Hz */
1092 1115
1093 pdata = match->data; 1116 pdata = match->data;
1094 dev->dtrev = pdata->rev;
1095 dev->flags = pdata->flags; 1117 dev->flags = pdata->flags;
1096 1118
1097 of_property_read_u32(node, "clock-frequency", &freq); 1119 of_property_read_u32(node, "clock-frequency", &freq);
@@ -1101,7 +1123,16 @@ omap_i2c_probe(struct platform_device *pdev)
1101 dev->speed = pdata->clkrate; 1123 dev->speed = pdata->clkrate;
1102 dev->flags = pdata->flags; 1124 dev->flags = pdata->flags;
1103 dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat; 1125 dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
1104 dev->dtrev = pdata->rev; 1126 }
1127
1128 dev->pins = devm_pinctrl_get_select_default(&pdev->dev);
1129 if (IS_ERR(dev->pins)) {
1130 if (PTR_ERR(dev->pins) == -EPROBE_DEFER)
1131 return -EPROBE_DEFER;
1132
1133 dev_warn(&pdev->dev, "did not get pins for i2c error: %li\n",
1134 PTR_ERR(dev->pins));
1135 dev->pins = NULL;
1105 } 1136 }
1106 1137
1107 dev->dev = &pdev->dev; 1138 dev->dev = &pdev->dev;
@@ -1114,11 +1145,6 @@ omap_i2c_probe(struct platform_device *pdev)
1114 1145
1115 dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3; 1146 dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
1116 1147
1117 if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
1118 dev->regs = (u8 *)reg_map_ip_v2;
1119 else
1120 dev->regs = (u8 *)reg_map_ip_v1;
1121
1122 pm_runtime_enable(dev->dev); 1148 pm_runtime_enable(dev->dev);
1123 pm_runtime_set_autosuspend_delay(dev->dev, OMAP_I2C_PM_TIMEOUT); 1149 pm_runtime_set_autosuspend_delay(dev->dev, OMAP_I2C_PM_TIMEOUT);
1124 pm_runtime_use_autosuspend(dev->dev); 1150 pm_runtime_use_autosuspend(dev->dev);
@@ -1127,11 +1153,37 @@ omap_i2c_probe(struct platform_device *pdev)
1127 if (IS_ERR_VALUE(r)) 1153 if (IS_ERR_VALUE(r))
1128 goto err_free_mem; 1154 goto err_free_mem;
1129 1155
1130 dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG) & 0xff; 1156 /*
1157 * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2.
1158 * On omap1/3/2 Offset 4 is IE Reg the bit [15:14] is 0 at reset.
1159 * Also since the omap_i2c_read_reg uses reg_map_ip_* a
1160 * raw_readw is done.
1161 */
1162 rev = __raw_readw(dev->base + 0x04);
1163
1164 scheme = OMAP_I2C_SCHEME(rev);
1165 switch (scheme) {
1166 case OMAP_I2C_SCHEME_0:
1167 dev->regs = (u8 *)reg_map_ip_v1;
1168 dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG);
1169 minor = OMAP_I2C_REV_SCHEME_0_MAJOR(dev->rev);
1170 major = OMAP_I2C_REV_SCHEME_0_MAJOR(dev->rev);
1171 break;
1172 case OMAP_I2C_SCHEME_1:
1173 /* FALLTHROUGH */
1174 default:
1175 dev->regs = (u8 *)reg_map_ip_v2;
1176 rev = (rev << 16) |
1177 omap_i2c_read_reg(dev, OMAP_I2C_IP_V2_REVNB_LO);
1178 minor = OMAP_I2C_REV_SCHEME_1_MINOR(rev);
1179 major = OMAP_I2C_REV_SCHEME_1_MAJOR(rev);
1180 dev->rev = rev;
1181 }
1131 1182
1132 dev->errata = 0; 1183 dev->errata = 0;
1133 1184
1134 if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207) 1185 if (dev->rev >= OMAP_I2C_REV_ON_2430 &&
1186 dev->rev < OMAP_I2C_REV_ON_4430_PLUS)
1135 dev->errata |= I2C_OMAP_ERRATA_I207; 1187 dev->errata |= I2C_OMAP_ERRATA_I207;
1136 1188
1137 if (dev->rev <= OMAP_I2C_REV_ON_3430_3530) 1189 if (dev->rev <= OMAP_I2C_REV_ON_3430_3530)
@@ -1152,7 +1204,7 @@ omap_i2c_probe(struct platform_device *pdev)
1152 1204
1153 dev->fifo_size = (dev->fifo_size / 2); 1205 dev->fifo_size = (dev->fifo_size / 2);
1154 1206
1155 if (dev->rev < OMAP_I2C_REV_ON_3630_4430) 1207 if (dev->rev < OMAP_I2C_REV_ON_3630)
1156 dev->b_hw = 1; /* Enable hardware fixes */ 1208 dev->b_hw = 1; /* Enable hardware fixes */
1157 1209
1158 /* calculate wakeup latency constraint for MPU */ 1210 /* calculate wakeup latency constraint for MPU */
@@ -1195,8 +1247,8 @@ omap_i2c_probe(struct platform_device *pdev)
1195 goto err_unuse_clocks; 1247 goto err_unuse_clocks;
1196 } 1248 }
1197 1249
1198 dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", adap->nr, 1250 dev_info(dev->dev, "bus %d rev%d.%d at %d kHz\n", adap->nr,
1199 dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed); 1251 major, minor, dev->speed);
1200 1252
1201 of_i2c_register_devices(adap); 1253 of_i2c_register_devices(adap);
1202 1254
@@ -1239,14 +1291,13 @@ static int omap_i2c_runtime_suspend(struct device *dev)
1239{ 1291{
1240 struct platform_device *pdev = to_platform_device(dev); 1292 struct platform_device *pdev = to_platform_device(dev);
1241 struct omap_i2c_dev *_dev = platform_get_drvdata(pdev); 1293 struct omap_i2c_dev *_dev = platform_get_drvdata(pdev);
1242 u16 iv;
1243 1294
1244 _dev->iestate = omap_i2c_read_reg(_dev, OMAP_I2C_IE_REG); 1295 _dev->iestate = omap_i2c_read_reg(_dev, OMAP_I2C_IE_REG);
1245 1296
1246 omap_i2c_write_reg(_dev, OMAP_I2C_IE_REG, 0); 1297 omap_i2c_write_reg(_dev, OMAP_I2C_IE_REG, 0);
1247 1298
1248 if (_dev->rev < OMAP_I2C_OMAP1_REV_2) { 1299 if (_dev->rev < OMAP_I2C_OMAP1_REV_2) {
1249 iv = omap_i2c_read_reg(_dev, OMAP_I2C_IV_REG); /* Read clears */ 1300 omap_i2c_read_reg(_dev, OMAP_I2C_IV_REG); /* Read clears */
1250 } else { 1301 } else {
1251 omap_i2c_write_reg(_dev, OMAP_I2C_STAT_REG, _dev->iestate); 1302 omap_i2c_write_reg(_dev, OMAP_I2C_STAT_REG, _dev->iestate);
1252 1303
@@ -1262,23 +1313,10 @@ static int omap_i2c_runtime_resume(struct device *dev)
1262 struct platform_device *pdev = to_platform_device(dev); 1313 struct platform_device *pdev = to_platform_device(dev);
1263 struct omap_i2c_dev *_dev = platform_get_drvdata(pdev); 1314 struct omap_i2c_dev *_dev = platform_get_drvdata(pdev);
1264 1315
1265 if (_dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) { 1316 if (!_dev->regs)
1266 omap_i2c_write_reg(_dev, OMAP_I2C_CON_REG, 0); 1317 return 0;
1267 omap_i2c_write_reg(_dev, OMAP_I2C_PSC_REG, _dev->pscstate);
1268 omap_i2c_write_reg(_dev, OMAP_I2C_SCLL_REG, _dev->scllstate);
1269 omap_i2c_write_reg(_dev, OMAP_I2C_SCLH_REG, _dev->sclhstate);
1270 omap_i2c_write_reg(_dev, OMAP_I2C_BUF_REG, _dev->bufstate);
1271 omap_i2c_write_reg(_dev, OMAP_I2C_SYSC_REG, _dev->syscstate);
1272 omap_i2c_write_reg(_dev, OMAP_I2C_WE_REG, _dev->westate);
1273 omap_i2c_write_reg(_dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
1274 }
1275 1318
1276 /* 1319 __omap_i2c_init(_dev);
1277 * Don't write to this register if the IE state is 0 as it can
1278 * cause deadlock.
1279 */
1280 if (_dev->iestate)
1281 omap_i2c_write_reg(_dev, OMAP_I2C_IE_REG, _dev->iestate);
1282 1320
1283 return 0; 1321 return 0;
1284} 1322}
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index f9399d163af2..72a8071a5556 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -642,7 +642,7 @@ static int __devinit rcar_i2c_probe(struct platform_device *pdev)
642 if (ret < 0) 642 if (ret < 0)
643 return ret; 643 return ret;
644 644
645 priv->io = devm_ioremap(dev, res->start, resource_size(res)); 645 priv->io = devm_request_and_ioremap(dev, res);
646 if (!priv->io) { 646 if (!priv->io) {
647 dev_err(dev, "cannot ioremap\n"); 647 dev_err(dev, "cannot ioremap\n");
648 return -ENODEV; 648 return -ENODEV;
@@ -693,7 +693,7 @@ static int __devexit rcar_i2c_remove(struct platform_device *pdev)
693 return 0; 693 return 0;
694} 694}
695 695
696static struct platform_driver rcar_i2c_drv = { 696static struct platform_driver rcar_i2c_driver = {
697 .driver = { 697 .driver = {
698 .name = "i2c-rcar", 698 .name = "i2c-rcar",
699 .owner = THIS_MODULE, 699 .owner = THIS_MODULE,
@@ -702,7 +702,7 @@ static struct platform_driver rcar_i2c_drv = {
702 .remove = __devexit_p(rcar_i2c_remove), 702 .remove = __devexit_p(rcar_i2c_remove),
703}; 703};
704 704
705module_platform_driver(rcar_i2c_drv); 705module_platform_driver(rcar_i2c_driver);
706 706
707MODULE_LICENSE("GPL"); 707MODULE_LICENSE("GPL");
708MODULE_DESCRIPTION("Renesas R-Car I2C bus driver"); 708MODULE_DESCRIPTION("Renesas R-Car I2C bus driver");
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index b33d95ebc890..a290d089ceaf 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -38,6 +38,7 @@
38#include <linux/io.h> 38#include <linux/io.h>
39#include <linux/of_i2c.h> 39#include <linux/of_i2c.h>
40#include <linux/of_gpio.h> 40#include <linux/of_gpio.h>
41#include <linux/pinctrl/consumer.h>
41 42
42#include <asm/irq.h> 43#include <asm/irq.h>
43 44
@@ -49,6 +50,9 @@
49#define QUIRK_HDMIPHY (1 << 1) 50#define QUIRK_HDMIPHY (1 << 1)
50#define QUIRK_NO_GPIO (1 << 2) 51#define QUIRK_NO_GPIO (1 << 2)
51 52
53/* Max time to wait for bus to become idle after a xfer (in us) */
54#define S3C2410_IDLE_TIMEOUT 5000
55
52/* i2c controller state */ 56/* i2c controller state */
53enum s3c24xx_i2c_state { 57enum s3c24xx_i2c_state {
54 STATE_IDLE, 58 STATE_IDLE,
@@ -59,7 +63,6 @@ enum s3c24xx_i2c_state {
59}; 63};
60 64
61struct s3c24xx_i2c { 65struct s3c24xx_i2c {
62 spinlock_t lock;
63 wait_queue_head_t wait; 66 wait_queue_head_t wait;
64 unsigned int quirks; 67 unsigned int quirks;
65 unsigned int suspended:1; 68 unsigned int suspended:1;
@@ -78,11 +81,11 @@ struct s3c24xx_i2c {
78 void __iomem *regs; 81 void __iomem *regs;
79 struct clk *clk; 82 struct clk *clk;
80 struct device *dev; 83 struct device *dev;
81 struct resource *ioarea;
82 struct i2c_adapter adap; 84 struct i2c_adapter adap;
83 85
84 struct s3c2410_platform_i2c *pdata; 86 struct s3c2410_platform_i2c *pdata;
85 int gpios[2]; 87 int gpios[2];
88 struct pinctrl *pctrl;
86#ifdef CONFIG_CPU_FREQ 89#ifdef CONFIG_CPU_FREQ
87 struct notifier_block freq_transition; 90 struct notifier_block freq_transition;
88#endif 91#endif
@@ -235,8 +238,47 @@ static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret)
235 238
236 dev_dbg(i2c->dev, "STOP\n"); 239 dev_dbg(i2c->dev, "STOP\n");
237 240
238 /* stop the transfer */ 241 /*
239 iicstat &= ~S3C2410_IICSTAT_START; 242 * The datasheet says that the STOP sequence should be:
243 * 1) I2CSTAT.5 = 0 - Clear BUSY (or 'generate STOP')
244 * 2) I2CCON.4 = 0 - Clear IRQPEND
245 * 3) Wait until the stop condition takes effect.
246 * 4*) I2CSTAT.4 = 0 - Clear TXRXEN
247 *
248 * Where, step "4*" is only for buses with the "HDMIPHY" quirk.
249 *
250 * However, after much experimentation, it appears that:
251 * a) normal buses automatically clear BUSY and transition from
252 * Master->Slave when they complete generating a STOP condition.
253 * Therefore, step (3) can be done in doxfer() by polling I2CCON.4
254 * after starting the STOP generation here.
255 * b) HDMIPHY bus does neither, so there is no way to do step 3.
256 * There is no indication when this bus has finished generating
257 * STOP.
258 *
259 * In fact, we have found that as soon as the IRQPEND bit is cleared in
260 * step 2, the HDMIPHY bus generates the STOP condition, and then
261 * immediately starts transferring another data byte, even though the
262 * bus is supposedly stopped. This is presumably because the bus is
263 * still in "Master" mode, and its BUSY bit is still set.
264 *
265 * To avoid these extra post-STOP transactions on HDMI phy devices, we
266 * just disable Serial Output on the bus (I2CSTAT.4 = 0) directly,
267 * instead of first generating a proper STOP condition. This should
268 * float SDA & SCK terminating the transfer. Subsequent transfers
269 * start with a proper START condition, and proceed normally.
270 *
271 * The HDMIPHY bus is an internal bus that always has exactly two
272 * devices, the host as Master and the HDMIPHY device as the slave.
273 * Skipping the STOP condition has been tested on this bus and works.
274 */
275 if (i2c->quirks & QUIRK_HDMIPHY) {
276 /* Stop driving the I2C pins */
277 iicstat &= ~S3C2410_IICSTAT_TXRXEN;
278 } else {
279 /* stop the transfer */
280 iicstat &= ~S3C2410_IICSTAT_START;
281 }
240 writel(iicstat, i2c->regs + S3C2410_IICSTAT); 282 writel(iicstat, i2c->regs + S3C2410_IICSTAT);
241 283
242 i2c->state = STATE_STOP; 284 i2c->state = STATE_STOP;
@@ -490,13 +532,6 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
490 unsigned long iicstat; 532 unsigned long iicstat;
491 int timeout = 400; 533 int timeout = 400;
492 534
493 /* the timeout for HDMIPHY is reduced to 10 ms because
494 * the hangup is expected to happen, so waiting 400 ms
495 * causes only unnecessary system hangup
496 */
497 if (i2c->quirks & QUIRK_HDMIPHY)
498 timeout = 10;
499
500 while (timeout-- > 0) { 535 while (timeout-- > 0) {
501 iicstat = readl(i2c->regs + S3C2410_IICSTAT); 536 iicstat = readl(i2c->regs + S3C2410_IICSTAT);
502 537
@@ -506,16 +541,61 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
506 msleep(1); 541 msleep(1);
507 } 542 }
508 543
509 /* hang-up of bus dedicated for HDMIPHY occurred, resetting */ 544 return -ETIMEDOUT;
510 if (i2c->quirks & QUIRK_HDMIPHY) { 545}
511 writel(0, i2c->regs + S3C2410_IICCON);
512 writel(0, i2c->regs + S3C2410_IICSTAT);
513 writel(0, i2c->regs + S3C2410_IICDS);
514 546
515 return 0; 547/* s3c24xx_i2c_wait_idle
548 *
549 * wait for the i2c bus to become idle.
550*/
551
552static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c)
553{
554 unsigned long iicstat;
555 ktime_t start, now;
556 unsigned long delay;
557 int spins;
558
559 /* ensure the stop has been through the bus */
560
561 dev_dbg(i2c->dev, "waiting for bus idle\n");
562
563 start = now = ktime_get();
564
565 /*
566 * Most of the time, the bus is already idle within a few usec of the
567 * end of a transaction. However, really slow i2c devices can stretch
568 * the clock, delaying STOP generation.
569 *
570 * On slower SoCs this typically happens within a very small number of
571 * instructions so busy wait briefly to avoid scheduling overhead.
572 */
573 spins = 3;
574 iicstat = readl(i2c->regs + S3C2410_IICSTAT);
575 while ((iicstat & S3C2410_IICSTAT_START) && --spins) {
576 cpu_relax();
577 iicstat = readl(i2c->regs + S3C2410_IICSTAT);
516 } 578 }
517 579
518 return -ETIMEDOUT; 580 /*
581 * If we do get an appreciable delay as a compromise between idle
582 * detection latency for the normal, fast case, and system load in the
583 * slow device case, use an exponential back off in the polling loop,
584 * up to 1/10th of the total timeout, then continue to poll at a
585 * constant rate up to the timeout.
586 */
587 delay = 1;
588 while ((iicstat & S3C2410_IICSTAT_START) &&
589 ktime_us_delta(now, start) < S3C2410_IDLE_TIMEOUT) {
590 usleep_range(delay, 2 * delay);
591 if (delay < S3C2410_IDLE_TIMEOUT / 10)
592 delay <<= 1;
593 now = ktime_get();
594 iicstat = readl(i2c->regs + S3C2410_IICSTAT);
595 }
596
597 if (iicstat & S3C2410_IICSTAT_START)
598 dev_warn(i2c->dev, "timeout waiting for bus idle\n");
519} 599}
520 600
521/* s3c24xx_i2c_doxfer 601/* s3c24xx_i2c_doxfer
@@ -526,8 +606,7 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
526static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c, 606static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
527 struct i2c_msg *msgs, int num) 607 struct i2c_msg *msgs, int num)
528{ 608{
529 unsigned long iicstat, timeout; 609 unsigned long timeout;
530 int spins = 20;
531 int ret; 610 int ret;
532 611
533 if (i2c->suspended) 612 if (i2c->suspended)
@@ -540,8 +619,6 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
540 goto out; 619 goto out;
541 } 620 }
542 621
543 spin_lock_irq(&i2c->lock);
544
545 i2c->msg = msgs; 622 i2c->msg = msgs;
546 i2c->msg_num = num; 623 i2c->msg_num = num;
547 i2c->msg_ptr = 0; 624 i2c->msg_ptr = 0;
@@ -550,7 +627,6 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
550 627
551 s3c24xx_i2c_enable_irq(i2c); 628 s3c24xx_i2c_enable_irq(i2c);
552 s3c24xx_i2c_message_start(i2c, msgs); 629 s3c24xx_i2c_message_start(i2c, msgs);
553 spin_unlock_irq(&i2c->lock);
554 630
555 timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5); 631 timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
556 632
@@ -564,24 +640,11 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
564 else if (ret != num) 640 else if (ret != num)
565 dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret); 641 dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
566 642
567 /* ensure the stop has been through the bus */ 643 /* For QUIRK_HDMIPHY, bus is already disabled */
568 644 if (i2c->quirks & QUIRK_HDMIPHY)
569 dev_dbg(i2c->dev, "waiting for bus idle\n"); 645 goto out;
570
571 /* first, try busy waiting briefly */
572 do {
573 cpu_relax();
574 iicstat = readl(i2c->regs + S3C2410_IICSTAT);
575 } while ((iicstat & S3C2410_IICSTAT_START) && --spins);
576
577 /* if that timed out sleep */
578 if (!spins) {
579 msleep(1);
580 iicstat = readl(i2c->regs + S3C2410_IICSTAT);
581 }
582 646
583 if (iicstat & S3C2410_IICSTAT_START) 647 s3c24xx_i2c_wait_idle(i2c);
584 dev_warn(i2c->dev, "timeout waiting for bus idle\n");
585 648
586 out: 649 out:
587 return ret; 650 return ret;
@@ -740,7 +803,6 @@ static int s3c24xx_i2c_cpufreq_transition(struct notifier_block *nb,
740 unsigned long val, void *data) 803 unsigned long val, void *data)
741{ 804{
742 struct s3c24xx_i2c *i2c = freq_to_i2c(nb); 805 struct s3c24xx_i2c *i2c = freq_to_i2c(nb);
743 unsigned long flags;
744 unsigned int got; 806 unsigned int got;
745 int delta_f; 807 int delta_f;
746 int ret; 808 int ret;
@@ -754,9 +816,9 @@ static int s3c24xx_i2c_cpufreq_transition(struct notifier_block *nb,
754 816
755 if ((val == CPUFREQ_POSTCHANGE && delta_f < 0) || 817 if ((val == CPUFREQ_POSTCHANGE && delta_f < 0) ||
756 (val == CPUFREQ_PRECHANGE && delta_f > 0)) { 818 (val == CPUFREQ_PRECHANGE && delta_f > 0)) {
757 spin_lock_irqsave(&i2c->lock, flags); 819 i2c_lock_adapter(&i2c->adap);
758 ret = s3c24xx_i2c_clockrate(i2c, &got); 820 ret = s3c24xx_i2c_clockrate(i2c, &got);
759 spin_unlock_irqrestore(&i2c->lock, flags); 821 i2c_unlock_adapter(&i2c->adap);
760 822
761 if (ret < 0) 823 if (ret < 0)
762 dev_err(i2c->dev, "cannot find frequency\n"); 824 dev_err(i2c->dev, "cannot find frequency\n");
@@ -858,14 +920,6 @@ static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
858 920
859 pdata = i2c->pdata; 921 pdata = i2c->pdata;
860 922
861 /* inititalise the gpio */
862
863 if (pdata->cfg_gpio)
864 pdata->cfg_gpio(to_platform_device(i2c->dev));
865 else
866 if (s3c24xx_i2c_parse_dt_gpio(i2c))
867 return -EINVAL;
868
869 /* write slave address */ 923 /* write slave address */
870 924
871 writeb(pdata->slave_addr, i2c->regs + S3C2410_IICADD); 925 writeb(pdata->slave_addr, i2c->regs + S3C2410_IICADD);
@@ -963,7 +1017,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
963 i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 1017 i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
964 i2c->tx_setup = 50; 1018 i2c->tx_setup = 50;
965 1019
966 spin_lock_init(&i2c->lock);
967 init_waitqueue_head(&i2c->wait); 1020 init_waitqueue_head(&i2c->wait);
968 1021
969 /* find the clock and enable it */ 1022 /* find the clock and enable it */
@@ -989,36 +1042,38 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
989 goto err_clk; 1042 goto err_clk;
990 } 1043 }
991 1044
992 i2c->ioarea = request_mem_region(res->start, resource_size(res), 1045 i2c->regs = devm_request_and_ioremap(&pdev->dev, res);
993 pdev->name);
994
995 if (i2c->ioarea == NULL) {
996 dev_err(&pdev->dev, "cannot request IO\n");
997 ret = -ENXIO;
998 goto err_clk;
999 }
1000
1001 i2c->regs = ioremap(res->start, resource_size(res));
1002 1046
1003 if (i2c->regs == NULL) { 1047 if (i2c->regs == NULL) {
1004 dev_err(&pdev->dev, "cannot map IO\n"); 1048 dev_err(&pdev->dev, "cannot map IO\n");
1005 ret = -ENXIO; 1049 ret = -ENXIO;
1006 goto err_ioarea; 1050 goto err_clk;
1007 } 1051 }
1008 1052
1009 dev_dbg(&pdev->dev, "registers %p (%p, %p)\n", 1053 dev_dbg(&pdev->dev, "registers %p (%p)\n",
1010 i2c->regs, i2c->ioarea, res); 1054 i2c->regs, res);
1011 1055
1012 /* setup info block for the i2c core */ 1056 /* setup info block for the i2c core */
1013 1057
1014 i2c->adap.algo_data = i2c; 1058 i2c->adap.algo_data = i2c;
1015 i2c->adap.dev.parent = &pdev->dev; 1059 i2c->adap.dev.parent = &pdev->dev;
1016 1060
1061 i2c->pctrl = devm_pinctrl_get_select_default(i2c->dev);
1062
1063 /* inititalise the i2c gpio lines */
1064
1065 if (i2c->pdata->cfg_gpio) {
1066 i2c->pdata->cfg_gpio(to_platform_device(i2c->dev));
1067 } else if (IS_ERR(i2c->pctrl) && s3c24xx_i2c_parse_dt_gpio(i2c)) {
1068 ret = -EINVAL;
1069 goto err_clk;
1070 }
1071
1017 /* initialise the i2c controller */ 1072 /* initialise the i2c controller */
1018 1073
1019 ret = s3c24xx_i2c_init(i2c); 1074 ret = s3c24xx_i2c_init(i2c);
1020 if (ret != 0) 1075 if (ret != 0)
1021 goto err_iomap; 1076 goto err_clk;
1022 1077
1023 /* find the IRQ for this unit (note, this relies on the init call to 1078 /* find the IRQ for this unit (note, this relies on the init call to
1024 * ensure no current IRQs pending 1079 * ensure no current IRQs pending
@@ -1027,7 +1082,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1027 i2c->irq = ret = platform_get_irq(pdev, 0); 1082 i2c->irq = ret = platform_get_irq(pdev, 0);
1028 if (ret <= 0) { 1083 if (ret <= 0) {
1029 dev_err(&pdev->dev, "cannot find IRQ\n"); 1084 dev_err(&pdev->dev, "cannot find IRQ\n");
1030 goto err_iomap; 1085 goto err_clk;
1031 } 1086 }
1032 1087
1033 ret = request_irq(i2c->irq, s3c24xx_i2c_irq, 0, 1088 ret = request_irq(i2c->irq, s3c24xx_i2c_irq, 0,
@@ -1035,7 +1090,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1035 1090
1036 if (ret != 0) { 1091 if (ret != 0) {
1037 dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq); 1092 dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
1038 goto err_iomap; 1093 goto err_clk;
1039 } 1094 }
1040 1095
1041 ret = s3c24xx_i2c_register_cpufreq(i2c); 1096 ret = s3c24xx_i2c_register_cpufreq(i2c);
@@ -1075,13 +1130,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1075 err_irq: 1130 err_irq:
1076 free_irq(i2c->irq, i2c); 1131 free_irq(i2c->irq, i2c);
1077 1132
1078 err_iomap:
1079 iounmap(i2c->regs);
1080
1081 err_ioarea:
1082 release_resource(i2c->ioarea);
1083 kfree(i2c->ioarea);
1084
1085 err_clk: 1133 err_clk:
1086 clk_disable_unprepare(i2c->clk); 1134 clk_disable_unprepare(i2c->clk);
1087 clk_put(i2c->clk); 1135 clk_put(i2c->clk);
@@ -1110,16 +1158,13 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
1110 clk_disable_unprepare(i2c->clk); 1158 clk_disable_unprepare(i2c->clk);
1111 clk_put(i2c->clk); 1159 clk_put(i2c->clk);
1112 1160
1113 iounmap(i2c->regs); 1161 if (pdev->dev.of_node && IS_ERR(i2c->pctrl))
1114 1162 s3c24xx_i2c_dt_gpio_free(i2c);
1115 release_resource(i2c->ioarea);
1116 s3c24xx_i2c_dt_gpio_free(i2c);
1117 kfree(i2c->ioarea);
1118 1163
1119 return 0; 1164 return 0;
1120} 1165}
1121 1166
1122#ifdef CONFIG_PM 1167#ifdef CONFIG_PM_SLEEP
1123static int s3c24xx_i2c_suspend_noirq(struct device *dev) 1168static int s3c24xx_i2c_suspend_noirq(struct device *dev)
1124{ 1169{
1125 struct platform_device *pdev = to_platform_device(dev); 1170 struct platform_device *pdev = to_platform_device(dev);
@@ -1142,10 +1187,14 @@ static int s3c24xx_i2c_resume(struct device *dev)
1142 1187
1143 return 0; 1188 return 0;
1144} 1189}
1190#endif
1145 1191
1192#ifdef CONFIG_PM
1146static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = { 1193static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
1194#ifdef CONFIG_PM_SLEEP
1147 .suspend_noirq = s3c24xx_i2c_suspend_noirq, 1195 .suspend_noirq = s3c24xx_i2c_suspend_noirq,
1148 .resume = s3c24xx_i2c_resume, 1196 .resume = s3c24xx_i2c_resume,
1197#endif
1149}; 1198};
1150 1199
1151#define S3C24XX_DEV_PM_OPS (&s3c24xx_i2c_dev_pm_ops) 1200#define S3C24XX_DEV_PM_OPS (&s3c24xx_i2c_dev_pm_ops)
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 8110ca45f342..9411c1b892c0 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -120,11 +120,12 @@ struct sh_mobile_i2c_data {
120 void __iomem *reg; 120 void __iomem *reg;
121 struct i2c_adapter adap; 121 struct i2c_adapter adap;
122 unsigned long bus_speed; 122 unsigned long bus_speed;
123 unsigned int clks_per_count;
123 struct clk *clk; 124 struct clk *clk;
124 u_int8_t icic; 125 u_int8_t icic;
125 u_int8_t iccl;
126 u_int8_t icch;
127 u_int8_t flags; 126 u_int8_t flags;
127 u_int16_t iccl;
128 u_int16_t icch;
128 129
129 spinlock_t lock; 130 spinlock_t lock;
130 wait_queue_head_t wait; 131 wait_queue_head_t wait;
@@ -135,7 +136,8 @@ struct sh_mobile_i2c_data {
135 136
136#define IIC_FLAG_HAS_ICIC67 (1 << 0) 137#define IIC_FLAG_HAS_ICIC67 (1 << 0)
137 138
138#define NORMAL_SPEED 100000 /* FAST_SPEED 400000 */ 139#define STANDARD_MODE 100000
140#define FAST_MODE 400000
139 141
140/* Register offsets */ 142/* Register offsets */
141#define ICDR 0x00 143#define ICDR 0x00
@@ -187,57 +189,90 @@ static void iic_set_clr(struct sh_mobile_i2c_data *pd, int offs,
187 iic_wr(pd, offs, (iic_rd(pd, offs) | set) & ~clr); 189 iic_wr(pd, offs, (iic_rd(pd, offs) | set) & ~clr);
188} 190}
189 191
190static void activate_ch(struct sh_mobile_i2c_data *pd) 192static u32 sh_mobile_i2c_iccl(unsigned long count_khz, u32 tLOW, u32 tf, int offset)
191{ 193{
192 unsigned long i2c_clk; 194 /*
193 u_int32_t num; 195 * Conditional expression:
194 u_int32_t denom; 196 * ICCL >= COUNT_CLK * (tLOW + tf)
195 u_int32_t tmp; 197 *
196 198 * SH-Mobile IIC hardware starts counting the LOW period of
197 /* Wake up device and enable clock */ 199 * the SCL signal (tLOW) as soon as it pulls the SCL line.
198 pm_runtime_get_sync(pd->dev); 200 * In order to meet the tLOW timing spec, we need to take into
199 clk_enable(pd->clk); 201 * account the fall time of SCL signal (tf). Default tf value
200 202 * should be 0.3 us, for safety.
201 /* Get clock rate after clock is enabled */ 203 */
202 i2c_clk = clk_get_rate(pd->clk); 204 return (((count_khz * (tLOW + tf)) + 5000) / 10000) + offset;
205}
203 206
204 /* Calculate the value for iccl. From the data sheet: 207static u32 sh_mobile_i2c_icch(unsigned long count_khz, u32 tHIGH, u32 tf, int offset)
205 * iccl = (p clock / transfer rate) * (L / (L + H)) 208{
206 * where L and H are the SCL low/high ratio (5/4 in this case). 209 /*
207 * We also round off the result. 210 * Conditional expression:
211 * ICCH >= COUNT_CLK * (tHIGH + tf)
212 *
213 * SH-Mobile IIC hardware is aware of SCL transition period 'tr',
214 * and can ignore it. SH-Mobile IIC controller starts counting
215 * the HIGH period of the SCL signal (tHIGH) after the SCL input
216 * voltage increases at VIH.
217 *
218 * Afterward it turned out calculating ICCH using only tHIGH spec
219 * will result in violation of the tHD;STA timing spec. We need
220 * to take into account the fall time of SDA signal (tf) at START
221 * condition, in order to meet both tHIGH and tHD;STA specs.
208 */ 222 */
209 num = i2c_clk * 5; 223 return (((count_khz * (tHIGH + tf)) + 5000) / 10000) + offset;
210 denom = pd->bus_speed * 9; 224}
211 tmp = num * 10 / denom;
212 if (tmp % 10 >= 5)
213 pd->iccl = (u_int8_t)((num/denom) + 1);
214 else
215 pd->iccl = (u_int8_t)(num/denom);
216 225
217 /* one more bit of ICCL in ICIC */ 226static void sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd)
218 if (pd->flags & IIC_FLAG_HAS_ICIC67) { 227{
219 if ((num/denom) > 0xff) 228 unsigned long i2c_clk_khz;
220 pd->icic |= ICIC_ICCLB8; 229 u32 tHIGH, tLOW, tf;
221 else 230 int offset;
222 pd->icic &= ~ICIC_ICCLB8; 231
232 /* Get clock rate after clock is enabled */
233 clk_enable(pd->clk);
234 i2c_clk_khz = clk_get_rate(pd->clk) / 1000;
235 i2c_clk_khz /= pd->clks_per_count;
236
237 if (pd->bus_speed == STANDARD_MODE) {
238 tLOW = 47; /* tLOW = 4.7 us */
239 tHIGH = 40; /* tHD;STA = tHIGH = 4.0 us */
240 tf = 3; /* tf = 0.3 us */
241 offset = 0; /* No offset */
242 } else if (pd->bus_speed == FAST_MODE) {
243 tLOW = 13; /* tLOW = 1.3 us */
244 tHIGH = 6; /* tHD;STA = tHIGH = 0.6 us */
245 tf = 3; /* tf = 0.3 us */
246 offset = 0; /* No offset */
247 } else {
248 dev_err(pd->dev, "unrecognized bus speed %lu Hz\n",
249 pd->bus_speed);
250 goto out;
223 } 251 }
224 252
225 /* Calculate the value for icch. From the data sheet: 253 pd->iccl = sh_mobile_i2c_iccl(i2c_clk_khz, tLOW, tf, offset);
226 icch = (p clock / transfer rate) * (H / (L + H)) */ 254 /* one more bit of ICCL in ICIC */
227 num = i2c_clk * 4; 255 if ((pd->iccl > 0xff) && (pd->flags & IIC_FLAG_HAS_ICIC67))
228 tmp = num * 10 / denom; 256 pd->icic |= ICIC_ICCLB8;
229 if (tmp % 10 >= 5)
230 pd->icch = (u_int8_t)((num/denom) + 1);
231 else 257 else
232 pd->icch = (u_int8_t)(num/denom); 258 pd->icic &= ~ICIC_ICCLB8;
233 259
260 pd->icch = sh_mobile_i2c_icch(i2c_clk_khz, tHIGH, tf, offset);
234 /* one more bit of ICCH in ICIC */ 261 /* one more bit of ICCH in ICIC */
235 if (pd->flags & IIC_FLAG_HAS_ICIC67) { 262 if ((pd->icch > 0xff) && (pd->flags & IIC_FLAG_HAS_ICIC67))
236 if ((num/denom) > 0xff) 263 pd->icic |= ICIC_ICCHB8;
237 pd->icic |= ICIC_ICCHB8; 264 else
238 else 265 pd->icic &= ~ICIC_ICCHB8;
239 pd->icic &= ~ICIC_ICCHB8; 266
240 } 267out:
268 clk_disable(pd->clk);
269}
270
271static void activate_ch(struct sh_mobile_i2c_data *pd)
272{
273 /* Wake up device and enable clock */
274 pm_runtime_get_sync(pd->dev);
275 clk_enable(pd->clk);
241 276
242 /* Enable channel and configure rx ack */ 277 /* Enable channel and configure rx ack */
243 iic_set_clr(pd, ICCR, ICCR_ICE, 0); 278 iic_set_clr(pd, ICCR, ICCR_ICE, 0);
@@ -246,8 +281,8 @@ static void activate_ch(struct sh_mobile_i2c_data *pd)
246 iic_wr(pd, ICIC, 0); 281 iic_wr(pd, ICIC, 0);
247 282
248 /* Set the clock */ 283 /* Set the clock */
249 iic_wr(pd, ICCL, pd->iccl); 284 iic_wr(pd, ICCL, pd->iccl & 0xff);
250 iic_wr(pd, ICCH, pd->icch); 285 iic_wr(pd, ICCH, pd->icch & 0xff);
251} 286}
252 287
253static void deactivate_ch(struct sh_mobile_i2c_data *pd) 288static void deactivate_ch(struct sh_mobile_i2c_data *pd)
@@ -434,6 +469,9 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
434 wake_up(&pd->wait); 469 wake_up(&pd->wait);
435 } 470 }
436 471
472 /* defeat write posting to avoid spurious WAIT interrupts */
473 iic_rd(pd, ICSR);
474
437 return IRQ_HANDLED; 475 return IRQ_HANDLED;
438} 476}
439 477
@@ -451,8 +489,8 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg)
451 iic_set_clr(pd, ICCR, ICCR_ICE, 0); 489 iic_set_clr(pd, ICCR, ICCR_ICE, 0);
452 490
453 /* Set the clock */ 491 /* Set the clock */
454 iic_wr(pd, ICCL, pd->iccl); 492 iic_wr(pd, ICCL, pd->iccl & 0xff);
455 iic_wr(pd, ICCH, pd->icch); 493 iic_wr(pd, ICCH, pd->icch & 0xff);
456 494
457 pd->msg = usr_msg; 495 pd->msg = usr_msg;
458 pd->pos = -1; 496 pd->pos = -1;
@@ -621,10 +659,13 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
621 goto err_irq; 659 goto err_irq;
622 } 660 }
623 661
624 /* Use platformd data bus speed or NORMAL_SPEED */ 662 /* Use platform data bus speed or STANDARD_MODE */
625 pd->bus_speed = NORMAL_SPEED; 663 pd->bus_speed = STANDARD_MODE;
626 if (pdata && pdata->bus_speed) 664 if (pdata && pdata->bus_speed)
627 pd->bus_speed = pdata->bus_speed; 665 pd->bus_speed = pdata->bus_speed;
666 pd->clks_per_count = 1;
667 if (pdata && pdata->clks_per_count)
668 pd->clks_per_count = pdata->clks_per_count;
628 669
629 /* The IIC blocks on SH-Mobile ARM processors 670 /* The IIC blocks on SH-Mobile ARM processors
630 * come with two new bits in ICIC. 671 * come with two new bits in ICIC.
@@ -632,6 +673,8 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
632 if (size > 0x17) 673 if (size > 0x17)
633 pd->flags |= IIC_FLAG_HAS_ICIC67; 674 pd->flags |= IIC_FLAG_HAS_ICIC67;
634 675
676 sh_mobile_i2c_init(pd);
677
635 /* Enable Runtime PM for this device. 678 /* Enable Runtime PM for this device.
636 * 679 *
637 * Also tell the Runtime PM core to ignore children 680 * Also tell the Runtime PM core to ignore children
@@ -667,8 +710,9 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
667 goto err_all; 710 goto err_all;
668 } 711 }
669 712
670 dev_info(&dev->dev, "I2C adapter %d with bus speed %lu Hz\n", 713 dev_info(&dev->dev,
671 adap->nr, pd->bus_speed); 714 "I2C adapter %d with bus speed %lu Hz (L/H=%x/%x)\n",
715 adap->nr, pd->bus_speed, pd->iccl, pd->icch);
672 716
673 of_i2c_register_devices(adap); 717 of_i2c_register_devices(adap);
674 return 0; 718 return 0;
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 566a6757a33d..3b7bc06fe8a6 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -16,6 +16,8 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/of_i2c.h>
20#include <linux/of_gpio.h>
19 21
20struct gpiomux { 22struct gpiomux {
21 struct i2c_adapter *parent; 23 struct i2c_adapter *parent;
@@ -57,29 +59,110 @@ static int __devinit match_gpio_chip_by_label(struct gpio_chip *chip,
57 return !strcmp(chip->label, data); 59 return !strcmp(chip->label, data);
58} 60}
59 61
62#ifdef CONFIG_OF
63static int __devinit i2c_mux_gpio_probe_dt(struct gpiomux *mux,
64 struct platform_device *pdev)
65{
66 struct device_node *np = pdev->dev.of_node;
67 struct device_node *adapter_np, *child;
68 struct i2c_adapter *adapter;
69 unsigned *values, *gpios;
70 int i = 0;
71
72 if (!np)
73 return -ENODEV;
74
75 adapter_np = of_parse_phandle(np, "i2c-parent", 0);
76 if (!adapter_np) {
77 dev_err(&pdev->dev, "Cannot parse i2c-parent\n");
78 return -ENODEV;
79 }
80 adapter = of_find_i2c_adapter_by_node(adapter_np);
81 if (!adapter) {
82 dev_err(&pdev->dev, "Cannot find parent bus\n");
83 return -ENODEV;
84 }
85 mux->data.parent = i2c_adapter_id(adapter);
86 put_device(&adapter->dev);
87
88 mux->data.n_values = of_get_child_count(np);
89
90 values = devm_kzalloc(&pdev->dev,
91 sizeof(*mux->data.values) * mux->data.n_values,
92 GFP_KERNEL);
93 if (!values) {
94 dev_err(&pdev->dev, "Cannot allocate values array");
95 return -ENOMEM;
96 }
97
98 for_each_child_of_node(np, child) {
99 of_property_read_u32(child, "reg", values + i);
100 i++;
101 }
102 mux->data.values = values;
103
104 if (of_property_read_u32(np, "idle-state", &mux->data.idle))
105 mux->data.idle = I2C_MUX_GPIO_NO_IDLE;
106
107 mux->data.n_gpios = of_gpio_named_count(np, "mux-gpios");
108 if (mux->data.n_gpios < 0) {
109 dev_err(&pdev->dev, "Missing mux-gpios property in the DT.\n");
110 return -EINVAL;
111 }
112
113 gpios = devm_kzalloc(&pdev->dev,
114 sizeof(*mux->data.gpios) * mux->data.n_gpios, GFP_KERNEL);
115 if (!gpios) {
116 dev_err(&pdev->dev, "Cannot allocate gpios array");
117 return -ENOMEM;
118 }
119
120 for (i = 0; i < mux->data.n_gpios; i++)
121 gpios[i] = of_get_named_gpio(np, "mux-gpios", i);
122
123 mux->data.gpios = gpios;
124
125 return 0;
126}
127#else
128static int __devinit i2c_mux_gpio_probe_dt(struct gpiomux *mux,
129 struct platform_device *pdev)
130{
131 return 0;
132}
133#endif
134
60static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev) 135static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
61{ 136{
62 struct gpiomux *mux; 137 struct gpiomux *mux;
63 struct i2c_mux_gpio_platform_data *pdata;
64 struct i2c_adapter *parent; 138 struct i2c_adapter *parent;
65 int (*deselect) (struct i2c_adapter *, void *, u32); 139 int (*deselect) (struct i2c_adapter *, void *, u32);
66 unsigned initial_state, gpio_base; 140 unsigned initial_state, gpio_base;
67 int i, ret; 141 int i, ret;
68 142
69 pdata = pdev->dev.platform_data; 143 mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
70 if (!pdata) { 144 if (!mux) {
71 dev_err(&pdev->dev, "Missing platform data\n"); 145 dev_err(&pdev->dev, "Cannot allocate gpiomux structure");
72 return -ENODEV; 146 return -ENOMEM;
73 } 147 }
74 148
149 platform_set_drvdata(pdev, mux);
150
151 if (!pdev->dev.platform_data) {
152 ret = i2c_mux_gpio_probe_dt(mux, pdev);
153 if (ret < 0)
154 return ret;
155 } else
156 memcpy(&mux->data, pdev->dev.platform_data, sizeof(mux->data));
157
75 /* 158 /*
76 * If a GPIO chip name is provided, the GPIO pin numbers provided are 159 * If a GPIO chip name is provided, the GPIO pin numbers provided are
77 * relative to its base GPIO number. Otherwise they are absolute. 160 * relative to its base GPIO number. Otherwise they are absolute.
78 */ 161 */
79 if (pdata->gpio_chip) { 162 if (mux->data.gpio_chip) {
80 struct gpio_chip *gpio; 163 struct gpio_chip *gpio;
81 164
82 gpio = gpiochip_find(pdata->gpio_chip, 165 gpio = gpiochip_find(mux->data.gpio_chip,
83 match_gpio_chip_by_label); 166 match_gpio_chip_by_label);
84 if (!gpio) 167 if (!gpio)
85 return -EPROBE_DEFER; 168 return -EPROBE_DEFER;
@@ -89,49 +172,44 @@ static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
89 gpio_base = 0; 172 gpio_base = 0;
90 } 173 }
91 174
92 parent = i2c_get_adapter(pdata->parent); 175 parent = i2c_get_adapter(mux->data.parent);
93 if (!parent) { 176 if (!parent) {
94 dev_err(&pdev->dev, "Parent adapter (%d) not found\n", 177 dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
95 pdata->parent); 178 mux->data.parent);
96 return -ENODEV; 179 return -ENODEV;
97 } 180 }
98 181
99 mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
100 if (!mux) {
101 ret = -ENOMEM;
102 goto alloc_failed;
103 }
104
105 mux->parent = parent; 182 mux->parent = parent;
106 mux->data = *pdata;
107 mux->gpio_base = gpio_base; 183 mux->gpio_base = gpio_base;
184
108 mux->adap = devm_kzalloc(&pdev->dev, 185 mux->adap = devm_kzalloc(&pdev->dev,
109 sizeof(*mux->adap) * pdata->n_values, 186 sizeof(*mux->adap) * mux->data.n_values,
110 GFP_KERNEL); 187 GFP_KERNEL);
111 if (!mux->adap) { 188 if (!mux->adap) {
189 dev_err(&pdev->dev, "Cannot allocate i2c_adapter structure");
112 ret = -ENOMEM; 190 ret = -ENOMEM;
113 goto alloc_failed; 191 goto alloc_failed;
114 } 192 }
115 193
116 if (pdata->idle != I2C_MUX_GPIO_NO_IDLE) { 194 if (mux->data.idle != I2C_MUX_GPIO_NO_IDLE) {
117 initial_state = pdata->idle; 195 initial_state = mux->data.idle;
118 deselect = i2c_mux_gpio_deselect; 196 deselect = i2c_mux_gpio_deselect;
119 } else { 197 } else {
120 initial_state = pdata->values[0]; 198 initial_state = mux->data.values[0];
121 deselect = NULL; 199 deselect = NULL;
122 } 200 }
123 201
124 for (i = 0; i < pdata->n_gpios; i++) { 202 for (i = 0; i < mux->data.n_gpios; i++) {
125 ret = gpio_request(gpio_base + pdata->gpios[i], "i2c-mux-gpio"); 203 ret = gpio_request(gpio_base + mux->data.gpios[i], "i2c-mux-gpio");
126 if (ret) 204 if (ret)
127 goto err_request_gpio; 205 goto err_request_gpio;
128 gpio_direction_output(gpio_base + pdata->gpios[i], 206 gpio_direction_output(gpio_base + mux->data.gpios[i],
129 initial_state & (1 << i)); 207 initial_state & (1 << i));
130 } 208 }
131 209
132 for (i = 0; i < pdata->n_values; i++) { 210 for (i = 0; i < mux->data.n_values; i++) {
133 u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0; 211 u32 nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0;
134 unsigned int class = pdata->classes ? pdata->classes[i] : 0; 212 unsigned int class = mux->data.classes ? mux->data.classes[i] : 0;
135 213
136 mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr, 214 mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr,
137 i, class, 215 i, class,
@@ -144,19 +222,17 @@ static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
144 } 222 }
145 223
146 dev_info(&pdev->dev, "%d port mux on %s adapter\n", 224 dev_info(&pdev->dev, "%d port mux on %s adapter\n",
147 pdata->n_values, parent->name); 225 mux->data.n_values, parent->name);
148
149 platform_set_drvdata(pdev, mux);
150 226
151 return 0; 227 return 0;
152 228
153add_adapter_failed: 229add_adapter_failed:
154 for (; i > 0; i--) 230 for (; i > 0; i--)
155 i2c_del_mux_adapter(mux->adap[i - 1]); 231 i2c_del_mux_adapter(mux->adap[i - 1]);
156 i = pdata->n_gpios; 232 i = mux->data.n_gpios;
157err_request_gpio: 233err_request_gpio:
158 for (; i > 0; i--) 234 for (; i > 0; i--)
159 gpio_free(gpio_base + pdata->gpios[i - 1]); 235 gpio_free(gpio_base + mux->data.gpios[i - 1]);
160alloc_failed: 236alloc_failed:
161 i2c_put_adapter(parent); 237 i2c_put_adapter(parent);
162 238
@@ -180,12 +256,19 @@ static int __devexit i2c_mux_gpio_remove(struct platform_device *pdev)
180 return 0; 256 return 0;
181} 257}
182 258
259static const struct of_device_id i2c_mux_gpio_of_match[] __devinitconst = {
260 { .compatible = "i2c-mux-gpio", },
261 {},
262};
263MODULE_DEVICE_TABLE(of, i2c_mux_gpio_of_match);
264
183static struct platform_driver i2c_mux_gpio_driver = { 265static struct platform_driver i2c_mux_gpio_driver = {
184 .probe = i2c_mux_gpio_probe, 266 .probe = i2c_mux_gpio_probe,
185 .remove = __devexit_p(i2c_mux_gpio_remove), 267 .remove = __devexit_p(i2c_mux_gpio_remove),
186 .driver = { 268 .driver = {
187 .owner = THIS_MODULE, 269 .owner = THIS_MODULE,
188 .name = "i2c-mux-gpio", 270 .name = "i2c-mux-gpio",
271 .of_match_table = of_match_ptr(i2c_mux_gpio_of_match),
189 }, 272 },
190}; 273};
191 274
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index d6cc77a53c7e..5f306f79da0c 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -921,6 +921,7 @@ static int __init i8042_platform_init(void)
921 int retval; 921 int retval;
922 922
923#ifdef CONFIG_X86 923#ifdef CONFIG_X86
924 u8 a20_on = 0xdf;
924 /* Just return if pre-detection shows no i8042 controller exist */ 925 /* Just return if pre-detection shows no i8042 controller exist */
925 if (!x86_platform.i8042_detect()) 926 if (!x86_platform.i8042_detect())
926 return -ENODEV; 927 return -ENODEV;
@@ -960,6 +961,14 @@ static int __init i8042_platform_init(void)
960 961
961 if (dmi_check_system(i8042_dmi_dritek_table)) 962 if (dmi_check_system(i8042_dmi_dritek_table))
962 i8042_dritek = true; 963 i8042_dritek = true;
964
965 /*
966 * A20 was already enabled during early kernel init. But some buggy
967 * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
968 * resume from S3. So we do it here and hope that nothing breaks.
969 */
970 i8042_command(&a20_on, 0x10d1);
971 i8042_command(NULL, 0x00ff); /* Null command for SMM firmware */
963#endif /* CONFIG_X86 */ 972#endif /* CONFIG_X86 */
964 973
965 return retval; 974 return retval;
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 28c99c623bcd..22b720ec80cb 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -1217,8 +1217,7 @@ static void __exit dsp_cleanup(void)
1217{ 1217{
1218 mISDN_unregister_Bprotocol(&DSP); 1218 mISDN_unregister_Bprotocol(&DSP);
1219 1219
1220 if (timer_pending(&dsp_spl_tl)) 1220 del_timer_sync(&dsp_spl_tl);
1221 del_timer(&dsp_spl_tl);
1222 1221
1223 if (!list_empty(&dsp_ilist)) { 1222 if (!list_empty(&dsp_ilist)) {
1224 printk(KERN_ERR "mISDN_dsp: Audio DSP object inst list not " 1223 printk(KERN_ERR "mISDN_dsp: Audio DSP object inst list not "
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 0c3ced70707b..164afa71bba7 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -792,6 +792,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
792 * than an unsolicited DID_ABORT. 792 * than an unsolicited DID_ABORT.
793 */ 793 */
794 sc->result = DID_RESET << 16; 794 sc->result = DID_RESET << 16;
795 break;
795 796
796 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ 797 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
797 if (ioc->bus_type == FC) 798 if (ioc->bus_type == FC)
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index 945393129952..7c057a05adb6 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -26,19 +26,16 @@
26#include <linux/mtd/mtd.h> 26#include <linux/mtd/mtd.h>
27#include <linux/mtd/partitions.h> 27#include <linux/mtd/partitions.h>
28#include <linux/bootmem.h> 28#include <linux/bootmem.h>
29#include <linux/magic.h>
30#include <linux/module.h> 29#include <linux/module.h>
31 30
31#include <uapi/linux/magic.h>
32
32#define AR7_PARTS 4 33#define AR7_PARTS 4
33#define ROOT_OFFSET 0xe0000 34#define ROOT_OFFSET 0xe0000
34 35
35#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42) 36#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42)
36#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281) 37#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281)
37 38
38#ifndef SQUASHFS_MAGIC
39#define SQUASHFS_MAGIC 0x73717368
40#endif
41
42struct ar7_bin_rec { 39struct ar7_bin_rec {
43 unsigned int checksum; 40 unsigned int checksum;
44 unsigned int length; 41 unsigned int length;
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index 63d2a64331f7..6eeb84c81bc2 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -37,8 +37,7 @@
37 37
38#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */ 38#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
39 39
40#define BCM63XX_MIN_CFE_SIZE 0x10000 /* always at least 64KiB */ 40#define BCM63XX_CFE_BLOCK_SIZE 0x10000 /* always at least 64KiB */
41#define BCM63XX_MIN_NVRAM_SIZE 0x10000 /* always at least 64KiB */
42 41
43#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0 42#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
44 43
@@ -79,7 +78,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
79 unsigned int rootfsaddr, kerneladdr, spareaddr; 78 unsigned int rootfsaddr, kerneladdr, spareaddr;
80 unsigned int rootfslen, kernellen, sparelen, totallen; 79 unsigned int rootfslen, kernellen, sparelen, totallen;
81 unsigned int cfelen, nvramlen; 80 unsigned int cfelen, nvramlen;
82 int namelen = 0; 81 unsigned int cfe_erasesize;
83 int i; 82 int i;
84 u32 computed_crc; 83 u32 computed_crc;
85 bool rootfs_first = false; 84 bool rootfs_first = false;
@@ -87,8 +86,11 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
87 if (bcm63xx_detect_cfe(master)) 86 if (bcm63xx_detect_cfe(master))
88 return -EINVAL; 87 return -EINVAL;
89 88
90 cfelen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_CFE_SIZE); 89 cfe_erasesize = max_t(uint32_t, master->erasesize,
91 nvramlen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_NVRAM_SIZE); 90 BCM63XX_CFE_BLOCK_SIZE);
91
92 cfelen = cfe_erasesize;
93 nvramlen = cfe_erasesize;
92 94
93 /* Allocate memory for buffer */ 95 /* Allocate memory for buffer */
94 buf = vmalloc(sizeof(struct bcm_tag)); 96 buf = vmalloc(sizeof(struct bcm_tag));
@@ -121,7 +123,6 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
121 kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE; 123 kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
122 rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE; 124 rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE;
123 spareaddr = roundup(totallen, master->erasesize) + cfelen; 125 spareaddr = roundup(totallen, master->erasesize) + cfelen;
124 sparelen = master->size - spareaddr - nvramlen;
125 126
126 if (rootfsaddr < kerneladdr) { 127 if (rootfsaddr < kerneladdr) {
127 /* default Broadcom layout */ 128 /* default Broadcom layout */
@@ -139,19 +140,15 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
139 rootfslen = 0; 140 rootfslen = 0;
140 rootfsaddr = 0; 141 rootfsaddr = 0;
141 spareaddr = cfelen; 142 spareaddr = cfelen;
142 sparelen = master->size - cfelen - nvramlen;
143 } 143 }
144 sparelen = master->size - spareaddr - nvramlen;
144 145
145 /* Determine number of partitions */ 146 /* Determine number of partitions */
146 namelen = 8; 147 if (rootfslen > 0)
147 if (rootfslen > 0) {
148 nrparts++; 148 nrparts++;
149 namelen += 6; 149
150 } 150 if (kernellen > 0)
151 if (kernellen > 0) {
152 nrparts++; 151 nrparts++;
153 namelen += 6;
154 }
155 152
156 /* Ask kernel for more memory */ 153 /* Ask kernel for more memory */
157 parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL); 154 parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
@@ -193,17 +190,16 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
193 parts[curpart].name = "nvram"; 190 parts[curpart].name = "nvram";
194 parts[curpart].offset = master->size - nvramlen; 191 parts[curpart].offset = master->size - nvramlen;
195 parts[curpart].size = nvramlen; 192 parts[curpart].size = nvramlen;
193 curpart++;
196 194
197 /* Global partition "linux" to make easy firmware upgrade */ 195 /* Global partition "linux" to make easy firmware upgrade */
198 curpart++;
199 parts[curpart].name = "linux"; 196 parts[curpart].name = "linux";
200 parts[curpart].offset = cfelen; 197 parts[curpart].offset = cfelen;
201 parts[curpart].size = master->size - cfelen - nvramlen; 198 parts[curpart].size = master->size - cfelen - nvramlen;
202 199
203 for (i = 0; i < nrparts; i++) 200 for (i = 0; i < nrparts; i++)
204 pr_info("Partition %d is %s offset %lx and length %lx\n", i, 201 pr_info("Partition %d is %s offset %llx and length %llx\n", i,
205 parts[i].name, (long unsigned int)(parts[i].offset), 202 parts[i].name, parts[i].offset, parts[i].size);
206 (long unsigned int)(parts[i].size));
207 203
208 pr_info("Spare partition is offset %x and length %x\n", spareaddr, 204 pr_info("Spare partition is offset %x and length %x\n", spareaddr,
209 sparelen); 205 sparelen);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 5ff5c4a16943..b86197286f24 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1536,8 +1536,20 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1536 UDELAY(map, chip, adr, 1); 1536 UDELAY(map, chip, adr, 1);
1537 } 1537 }
1538 1538
1539 /* reset on all failures. */ 1539 /*
1540 map_write( map, CMD(0xF0), chip->start ); 1540 * Recovery from write-buffer programming failures requires
1541 * the write-to-buffer-reset sequence. Since the last part
1542 * of the sequence also works as a normal reset, we can run
1543 * the same commands regardless of why we are here.
1544 * See e.g.
1545 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
1546 */
1547 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1548 cfi->device_type, NULL);
1549 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1550 cfi->device_type, NULL);
1551 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
1552 cfi->device_type, NULL);
1541 xip_enable(map, chip, adr); 1553 xip_enable(map, chip, adr);
1542 /* FIXME - should have reset delay before continuing */ 1554 /* FIXME - should have reset delay before continuing */
1543 1555
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index aed1b8a63c9f..c533f27d863f 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -56,8 +56,8 @@
56 56
57 57
58/* special size referring to all the remaining space in a partition */ 58/* special size referring to all the remaining space in a partition */
59#define SIZE_REMAINING UINT_MAX 59#define SIZE_REMAINING ULLONG_MAX
60#define OFFSET_CONTINUOUS UINT_MAX 60#define OFFSET_CONTINUOUS ULLONG_MAX
61 61
62struct cmdline_mtd_partition { 62struct cmdline_mtd_partition {
63 struct cmdline_mtd_partition *next; 63 struct cmdline_mtd_partition *next;
@@ -89,7 +89,7 @@ static struct mtd_partition * newpart(char *s,
89 int extra_mem_size) 89 int extra_mem_size)
90{ 90{
91 struct mtd_partition *parts; 91 struct mtd_partition *parts;
92 unsigned long size, offset = OFFSET_CONTINUOUS; 92 unsigned long long size, offset = OFFSET_CONTINUOUS;
93 char *name; 93 char *name;
94 int name_len; 94 int name_len;
95 unsigned char *extra_mem; 95 unsigned char *extra_mem;
@@ -104,7 +104,8 @@ static struct mtd_partition * newpart(char *s,
104 } else { 104 } else {
105 size = memparse(s, &s); 105 size = memparse(s, &s);
106 if (size < PAGE_SIZE) { 106 if (size < PAGE_SIZE) {
107 printk(KERN_ERR ERRP "partition size too small (%lx)\n", size); 107 printk(KERN_ERR ERRP "partition size too small (%llx)\n",
108 size);
108 return ERR_PTR(-EINVAL); 109 return ERR_PTR(-EINVAL);
109 } 110 }
110 } 111 }
@@ -296,7 +297,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
296 struct mtd_partition **pparts, 297 struct mtd_partition **pparts,
297 struct mtd_part_parser_data *data) 298 struct mtd_part_parser_data *data)
298{ 299{
299 unsigned long offset; 300 unsigned long long offset;
300 int i, err; 301 int i, err;
301 struct cmdline_mtd_partition *part; 302 struct cmdline_mtd_partition *part;
302 const char *mtd_id = master->name; 303 const char *mtd_id = master->name;
@@ -308,48 +309,52 @@ static int parse_cmdline_partitions(struct mtd_info *master,
308 return err; 309 return err;
309 } 310 }
310 311
312 /*
313 * Search for the partition definition matching master->name.
314 * If master->name is not set, stop at first partition definition.
315 */
311 for (part = partitions; part; part = part->next) { 316 for (part = partitions; part; part = part->next) {
312 if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) { 317 if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id)))
313 for (i = 0, offset = 0; i < part->num_parts; i++) { 318 break;
314 if (part->parts[i].offset == OFFSET_CONTINUOUS) 319 }
315 part->parts[i].offset = offset; 320
316 else 321 if (!part)
317 offset = part->parts[i].offset; 322 return 0;
318 323
319 if (part->parts[i].size == SIZE_REMAINING) 324 for (i = 0, offset = 0; i < part->num_parts; i++) {
320 part->parts[i].size = master->size - offset; 325 if (part->parts[i].offset == OFFSET_CONTINUOUS)
321 326 part->parts[i].offset = offset;
322 if (part->parts[i].size == 0) { 327 else
323 printk(KERN_WARNING ERRP 328 offset = part->parts[i].offset;
324 "%s: skipping zero sized partition\n", 329
325 part->mtd_id); 330 if (part->parts[i].size == SIZE_REMAINING)
326 part->num_parts--; 331 part->parts[i].size = master->size - offset;
327 memmove(&part->parts[i], 332
328 &part->parts[i + 1], 333 if (part->parts[i].size == 0) {
329 sizeof(*part->parts) * (part->num_parts - i)); 334 printk(KERN_WARNING ERRP
330 continue; 335 "%s: skipping zero sized partition\n",
331 } 336 part->mtd_id);
332 337 part->num_parts--;
333 if (offset + part->parts[i].size > master->size) { 338 memmove(&part->parts[i], &part->parts[i + 1],
334 printk(KERN_WARNING ERRP 339 sizeof(*part->parts) * (part->num_parts - i));
335 "%s: partitioning exceeds flash size, truncating\n", 340 continue;
336 part->mtd_id);
337 part->parts[i].size = master->size - offset;
338 }
339 offset += part->parts[i].size;
340 }
341
342 *pparts = kmemdup(part->parts,
343 sizeof(*part->parts) * part->num_parts,
344 GFP_KERNEL);
345 if (!*pparts)
346 return -ENOMEM;
347
348 return part->num_parts;
349 } 341 }
342
343 if (offset + part->parts[i].size > master->size) {
344 printk(KERN_WARNING ERRP
345 "%s: partitioning exceeds flash size, truncating\n",
346 part->mtd_id);
347 part->parts[i].size = master->size - offset;
348 }
349 offset += part->parts[i].size;
350 } 350 }
351 351
352 return 0; 352 *pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts,
353 GFP_KERNEL);
354 if (!*pparts)
355 return -ENOMEM;
356
357 return part->num_parts;
353} 358}
354 359
355 360
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 2dc5a6f3fd57..4714584aa993 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -66,7 +66,7 @@ out:
66 return err; 66 return err;
67} 67}
68 68
69static int __devexit bcm47xxsflash_remove(struct platform_device *pdev) 69static int bcm47xxsflash_remove(struct platform_device *pdev)
70{ 70{
71 struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev); 71 struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
72 72
@@ -77,7 +77,7 @@ static int __devexit bcm47xxsflash_remove(struct platform_device *pdev)
77} 77}
78 78
79static struct platform_driver bcma_sflash_driver = { 79static struct platform_driver bcma_sflash_driver = {
80 .remove = __devexit_p(bcm47xxsflash_remove), 80 .remove = bcm47xxsflash_remove,
81 .driver = { 81 .driver = {
82 .name = "bcma_sflash", 82 .name = "bcma_sflash",
83 .owner = THIS_MODULE, 83 .owner = THIS_MODULE,
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 681e2ee0f2d6..e081bfeaaf7d 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -62,6 +62,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
62 memset(page_address(page), 0xff, PAGE_SIZE); 62 memset(page_address(page), 0xff, PAGE_SIZE);
63 set_page_dirty(page); 63 set_page_dirty(page);
64 unlock_page(page); 64 unlock_page(page);
65 balance_dirty_pages_ratelimited(mapping);
65 break; 66 break;
66 } 67 }
67 68
@@ -152,6 +153,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
152 memcpy(page_address(page) + offset, buf, cpylen); 153 memcpy(page_address(page) + offset, buf, cpylen);
153 set_page_dirty(page); 154 set_page_dirty(page);
154 unlock_page(page); 155 unlock_page(page);
156 balance_dirty_pages_ratelimited(mapping);
155 } 157 }
156 page_cache_release(page); 158 page_cache_release(page);
157 159
@@ -433,7 +435,7 @@ static int __init block2mtd_init(void)
433} 435}
434 436
435 437
436static void __devexit block2mtd_exit(void) 438static void block2mtd_exit(void)
437{ 439{
438 struct list_head *pos, *next; 440 struct list_head *pos, *next;
439 441
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index d34d83b8f9c2..8510ccb9c6f0 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1440,7 +1440,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
1440 oobdelta = mtd->ecclayout->oobavail; 1440 oobdelta = mtd->ecclayout->oobavail;
1441 break; 1441 break;
1442 default: 1442 default:
1443 oobdelta = 0; 1443 return -EINVAL;
1444 } 1444 }
1445 if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) || 1445 if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) ||
1446 (ofs % DOC_LAYOUT_PAGE_SIZE)) 1446 (ofs % DOC_LAYOUT_PAGE_SIZE))
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
index 706b847b46b3..88b3fd3e18a7 100644
--- a/drivers/mtd/devices/docprobe.c
+++ b/drivers/mtd/devices/docprobe.c
@@ -70,8 +70,6 @@ static unsigned long __initdata doc_locations[] = {
70 0xe0000, 0xe2000, 0xe4000, 0xe6000, 70 0xe0000, 0xe2000, 0xe4000, 0xe6000,
71 0xe8000, 0xea000, 0xec000, 0xee000, 71 0xe8000, 0xea000, 0xec000, 0xee000,
72#endif /* CONFIG_MTD_DOCPROBE_HIGH */ 72#endif /* CONFIG_MTD_DOCPROBE_HIGH */
73#else
74#warning Unknown architecture for DiskOnChip. No default probe locations defined
75#endif 73#endif
76 0xffffffff }; 74 0xffffffff };
77 75
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 03838bab1f59..4eeeb2d7f6ea 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -73,14 +73,6 @@
73#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */ 73#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
74#define MAX_CMD_SIZE 5 74#define MAX_CMD_SIZE 5
75 75
76#ifdef CONFIG_M25PXX_USE_FAST_READ
77#define OPCODE_READ OPCODE_FAST_READ
78#define FAST_READ_DUMMY_BYTE 1
79#else
80#define OPCODE_READ OPCODE_NORM_READ
81#define FAST_READ_DUMMY_BYTE 0
82#endif
83
84#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16) 76#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
85 77
86/****************************************************************************/ 78/****************************************************************************/
@@ -93,6 +85,7 @@ struct m25p {
93 u16 addr_width; 85 u16 addr_width;
94 u8 erase_opcode; 86 u8 erase_opcode;
95 u8 *command; 87 u8 *command;
88 bool fast_read;
96}; 89};
97 90
98static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd) 91static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
@@ -168,6 +161,7 @@ static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
168{ 161{
169 switch (JEDEC_MFR(jedec_id)) { 162 switch (JEDEC_MFR(jedec_id)) {
170 case CFI_MFR_MACRONIX: 163 case CFI_MFR_MACRONIX:
164 case 0xEF /* winbond */:
171 flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B; 165 flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
172 return spi_write(flash->spi, flash->command, 1); 166 return spi_write(flash->spi, flash->command, 1);
173 default: 167 default:
@@ -342,6 +336,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
342 struct m25p *flash = mtd_to_m25p(mtd); 336 struct m25p *flash = mtd_to_m25p(mtd);
343 struct spi_transfer t[2]; 337 struct spi_transfer t[2];
344 struct spi_message m; 338 struct spi_message m;
339 uint8_t opcode;
345 340
346 pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev), 341 pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
347 __func__, (u32)from, len); 342 __func__, (u32)from, len);
@@ -354,7 +349,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
354 * Should add 1 byte DUMMY_BYTE. 349 * Should add 1 byte DUMMY_BYTE.
355 */ 350 */
356 t[0].tx_buf = flash->command; 351 t[0].tx_buf = flash->command;
357 t[0].len = m25p_cmdsz(flash) + FAST_READ_DUMMY_BYTE; 352 t[0].len = m25p_cmdsz(flash) + (flash->fast_read ? 1 : 0);
358 spi_message_add_tail(&t[0], &m); 353 spi_message_add_tail(&t[0], &m);
359 354
360 t[1].rx_buf = buf; 355 t[1].rx_buf = buf;
@@ -376,12 +371,14 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
376 */ 371 */
377 372
378 /* Set up the write data buffer. */ 373 /* Set up the write data buffer. */
379 flash->command[0] = OPCODE_READ; 374 opcode = flash->fast_read ? OPCODE_FAST_READ : OPCODE_NORM_READ;
375 flash->command[0] = opcode;
380 m25p_addr2cmd(flash, from, flash->command); 376 m25p_addr2cmd(flash, from, flash->command);
381 377
382 spi_sync(flash->spi, &m); 378 spi_sync(flash->spi, &m);
383 379
384 *retlen = m.actual_length - m25p_cmdsz(flash) - FAST_READ_DUMMY_BYTE; 380 *retlen = m.actual_length - m25p_cmdsz(flash) -
381 (flash->fast_read ? 1 : 0);
385 382
386 mutex_unlock(&flash->lock); 383 mutex_unlock(&flash->lock);
387 384
@@ -664,7 +661,8 @@ static const struct spi_device_id m25p_ids[] = {
664 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, 661 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
665 662
666 /* Micron */ 663 /* Micron */
667 { "n25q128", INFO(0x20ba18, 0, 64 * 1024, 256, 0) }, 664 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
665 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
668 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) }, 666 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
669 667
670 /* Spansion -- single (large) sector size only, at least 668 /* Spansion -- single (large) sector size only, at least
@@ -745,6 +743,8 @@ static const struct spi_device_id m25p_ids[] = {
745 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, 743 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
746 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, 744 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
747 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, 745 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
746 { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
747 { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
748 748
749 /* Catalyst / On Semiconductor -- non-JEDEC */ 749 /* Catalyst / On Semiconductor -- non-JEDEC */
750 { "cat25c11", CAT25_INFO( 16, 8, 16, 1) }, 750 { "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
@@ -756,7 +756,7 @@ static const struct spi_device_id m25p_ids[] = {
756}; 756};
757MODULE_DEVICE_TABLE(spi, m25p_ids); 757MODULE_DEVICE_TABLE(spi, m25p_ids);
758 758
759static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi) 759static const struct spi_device_id *jedec_probe(struct spi_device *spi)
760{ 760{
761 int tmp; 761 int tmp;
762 u8 code = OPCODE_RDID; 762 u8 code = OPCODE_RDID;
@@ -801,7 +801,7 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
801 * matches what the READ command supports, at least until this driver 801 * matches what the READ command supports, at least until this driver
802 * understands FAST_READ (for clocks over 25 MHz). 802 * understands FAST_READ (for clocks over 25 MHz).
803 */ 803 */
804static int __devinit m25p_probe(struct spi_device *spi) 804static int m25p_probe(struct spi_device *spi)
805{ 805{
806 const struct spi_device_id *id = spi_get_device_id(spi); 806 const struct spi_device_id *id = spi_get_device_id(spi);
807 struct flash_platform_data *data; 807 struct flash_platform_data *data;
@@ -809,9 +809,10 @@ static int __devinit m25p_probe(struct spi_device *spi)
809 struct flash_info *info; 809 struct flash_info *info;
810 unsigned i; 810 unsigned i;
811 struct mtd_part_parser_data ppdata; 811 struct mtd_part_parser_data ppdata;
812 struct device_node __maybe_unused *np = spi->dev.of_node;
812 813
813#ifdef CONFIG_MTD_OF_PARTS 814#ifdef CONFIG_MTD_OF_PARTS
814 if (!of_device_is_available(spi->dev.of_node)) 815 if (!of_device_is_available(np))
815 return -ENODEV; 816 return -ENODEV;
816#endif 817#endif
817 818
@@ -863,7 +864,8 @@ static int __devinit m25p_probe(struct spi_device *spi)
863 flash = kzalloc(sizeof *flash, GFP_KERNEL); 864 flash = kzalloc(sizeof *flash, GFP_KERNEL);
864 if (!flash) 865 if (!flash)
865 return -ENOMEM; 866 return -ENOMEM;
866 flash->command = kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL); 867 flash->command = kmalloc(MAX_CMD_SIZE + (flash->fast_read ? 1 : 0),
868 GFP_KERNEL);
867 if (!flash->command) { 869 if (!flash->command) {
868 kfree(flash); 870 kfree(flash);
869 return -ENOMEM; 871 return -ENOMEM;
@@ -920,6 +922,16 @@ static int __devinit m25p_probe(struct spi_device *spi)
920 flash->page_size = info->page_size; 922 flash->page_size = info->page_size;
921 flash->mtd.writebufsize = flash->page_size; 923 flash->mtd.writebufsize = flash->page_size;
922 924
925 flash->fast_read = false;
926#ifdef CONFIG_OF
927 if (np && of_property_read_bool(np, "m25p,fast-read"))
928 flash->fast_read = true;
929#endif
930
931#ifdef CONFIG_M25PXX_USE_FAST_READ
932 flash->fast_read = true;
933#endif
934
923 if (info->addr_width) 935 if (info->addr_width)
924 flash->addr_width = info->addr_width; 936 flash->addr_width = info->addr_width;
925 else { 937 else {
@@ -961,7 +973,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
961} 973}
962 974
963 975
964static int __devexit m25p_remove(struct spi_device *spi) 976static int m25p_remove(struct spi_device *spi)
965{ 977{
966 struct m25p *flash = dev_get_drvdata(&spi->dev); 978 struct m25p *flash = dev_get_drvdata(&spi->dev);
967 int status; 979 int status;
@@ -983,7 +995,7 @@ static struct spi_driver m25p80_driver = {
983 }, 995 },
984 .id_table = m25p_ids, 996 .id_table = m25p_ids,
985 .probe = m25p_probe, 997 .probe = m25p_probe,
986 .remove = __devexit_p(m25p_remove), 998 .remove = m25p_remove,
987 999
988 /* REVISIT: many of these chips have deep power-down modes, which 1000 /* REVISIT: many of these chips have deep power-down modes, which
989 * should clearly be entered on suspend() to minimize power use. 1001 * should clearly be entered on suspend() to minimize power use.
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 928fb0e6d73a..ea7ea7b595d8 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -618,7 +618,7 @@ static char *otp_setup(struct mtd_info *device, char revision)
618/* 618/*
619 * Register DataFlash device with MTD subsystem. 619 * Register DataFlash device with MTD subsystem.
620 */ 620 */
621static int __devinit 621static int
622add_dataflash_otp(struct spi_device *spi, char *name, 622add_dataflash_otp(struct spi_device *spi, char *name,
623 int nr_pages, int pagesize, int pageoffset, char revision) 623 int nr_pages, int pagesize, int pageoffset, char revision)
624{ 624{
@@ -679,7 +679,7 @@ add_dataflash_otp(struct spi_device *spi, char *name,
679 return err; 679 return err;
680} 680}
681 681
682static inline int __devinit 682static inline int
683add_dataflash(struct spi_device *spi, char *name, 683add_dataflash(struct spi_device *spi, char *name,
684 int nr_pages, int pagesize, int pageoffset) 684 int nr_pages, int pagesize, int pageoffset)
685{ 685{
@@ -705,7 +705,7 @@ struct flash_info {
705#define IS_POW2PS 0x0001 /* uses 2^N byte pages */ 705#define IS_POW2PS 0x0001 /* uses 2^N byte pages */
706}; 706};
707 707
708static struct flash_info __devinitdata dataflash_data [] = { 708static struct flash_info dataflash_data[] = {
709 709
710 /* 710 /*
711 * NOTE: chips with SUP_POW2PS (rev D and up) need two entries, 711 * NOTE: chips with SUP_POW2PS (rev D and up) need two entries,
@@ -740,7 +740,7 @@ static struct flash_info __devinitdata dataflash_data [] = {
740 { "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS}, 740 { "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
741}; 741};
742 742
743static struct flash_info *__devinit jedec_probe(struct spi_device *spi) 743static struct flash_info *jedec_probe(struct spi_device *spi)
744{ 744{
745 int tmp; 745 int tmp;
746 uint8_t code = OP_READ_ID; 746 uint8_t code = OP_READ_ID;
@@ -823,7 +823,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
823 * AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11 823 * AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11
824 * AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11 824 * AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11
825 */ 825 */
826static int __devinit dataflash_probe(struct spi_device *spi) 826static int dataflash_probe(struct spi_device *spi)
827{ 827{
828 int status; 828 int status;
829 struct flash_info *info; 829 struct flash_info *info;
@@ -897,7 +897,7 @@ static int __devinit dataflash_probe(struct spi_device *spi)
897 return status; 897 return status;
898} 898}
899 899
900static int __devexit dataflash_remove(struct spi_device *spi) 900static int dataflash_remove(struct spi_device *spi)
901{ 901{
902 struct dataflash *flash = dev_get_drvdata(&spi->dev); 902 struct dataflash *flash = dev_get_drvdata(&spi->dev);
903 int status; 903 int status;
@@ -920,7 +920,7 @@ static struct spi_driver dataflash_driver = {
920 }, 920 },
921 921
922 .probe = dataflash_probe, 922 .probe = dataflash_probe,
923 .remove = __devexit_p(dataflash_remove), 923 .remove = dataflash_remove,
924 924
925 /* FIXME: investigate suspend and resume... */ 925 /* FIXME: investigate suspend and resume... */
926}; 926};
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index dcc3c9511530..2d2c2a5d4d2a 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -756,7 +756,7 @@ err_probe:
756 756
757 757
758#ifdef CONFIG_OF 758#ifdef CONFIG_OF
759static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev, 759static int spear_smi_probe_config_dt(struct platform_device *pdev,
760 struct device_node *np) 760 struct device_node *np)
761{ 761{
762 struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev); 762 struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev);
@@ -799,7 +799,7 @@ static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
799 return 0; 799 return 0;
800} 800}
801#else 801#else
802static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev, 802static int spear_smi_probe_config_dt(struct platform_device *pdev,
803 struct device_node *np) 803 struct device_node *np)
804{ 804{
805 return -ENOSYS; 805 return -ENOSYS;
@@ -901,7 +901,7 @@ static int spear_smi_setup_banks(struct platform_device *pdev,
901 * and do proper init for any found one. 901 * and do proper init for any found one.
902 * Returns 0 on success, non zero otherwise 902 * Returns 0 on success, non zero otherwise
903 */ 903 */
904static int __devinit spear_smi_probe(struct platform_device *pdev) 904static int spear_smi_probe(struct platform_device *pdev)
905{ 905{
906 struct device_node *np = pdev->dev.of_node; 906 struct device_node *np = pdev->dev.of_node;
907 struct spear_smi_plat_data *pdata = NULL; 907 struct spear_smi_plat_data *pdata = NULL;
@@ -1016,7 +1016,7 @@ err:
1016 * 1016 *
1017 * free all allocations and delete the partitions. 1017 * free all allocations and delete the partitions.
1018 */ 1018 */
1019static int __devexit spear_smi_remove(struct platform_device *pdev) 1019static int spear_smi_remove(struct platform_device *pdev)
1020{ 1020{
1021 struct spear_smi *dev; 1021 struct spear_smi *dev;
1022 struct spear_snor_flash *flash; 1022 struct spear_snor_flash *flash;
@@ -1092,20 +1092,9 @@ static struct platform_driver spear_smi_driver = {
1092#endif 1092#endif
1093 }, 1093 },
1094 .probe = spear_smi_probe, 1094 .probe = spear_smi_probe,
1095 .remove = __devexit_p(spear_smi_remove), 1095 .remove = spear_smi_remove,
1096}; 1096};
1097 1097module_platform_driver(spear_smi_driver);
1098static int spear_smi_init(void)
1099{
1100 return platform_driver_register(&spear_smi_driver);
1101}
1102module_init(spear_smi_init);
1103
1104static void spear_smi_exit(void)
1105{
1106 platform_driver_unregister(&spear_smi_driver);
1107}
1108module_exit(spear_smi_exit);
1109 1098
1110MODULE_LICENSE("GPL"); 1099MODULE_LICENSE("GPL");
1111MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.hashim@st.com>"); 1100MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.hashim@st.com>");
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index ab8a2f4c8d60..8091b0163694 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -64,7 +64,7 @@ struct flash_info {
64 64
65#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd) 65#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd)
66 66
67static struct flash_info __devinitdata sst25l_flash_info[] = { 67static struct flash_info sst25l_flash_info[] = {
68 {"sst25lf020a", 0xbf43, 256, 1024, 4096}, 68 {"sst25lf020a", 0xbf43, 256, 1024, 4096},
69 {"sst25lf040a", 0xbf44, 256, 2048, 4096}, 69 {"sst25lf040a", 0xbf44, 256, 2048, 4096},
70}; 70};
@@ -313,7 +313,7 @@ out:
313 return ret; 313 return ret;
314} 314}
315 315
316static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi) 316static struct flash_info *sst25l_match_device(struct spi_device *spi)
317{ 317{
318 struct flash_info *flash_info = NULL; 318 struct flash_info *flash_info = NULL;
319 struct spi_message m; 319 struct spi_message m;
@@ -353,7 +353,7 @@ static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
353 return flash_info; 353 return flash_info;
354} 354}
355 355
356static int __devinit sst25l_probe(struct spi_device *spi) 356static int sst25l_probe(struct spi_device *spi)
357{ 357{
358 struct flash_info *flash_info; 358 struct flash_info *flash_info;
359 struct sst25l_flash *flash; 359 struct sst25l_flash *flash;
@@ -411,7 +411,7 @@ static int __devinit sst25l_probe(struct spi_device *spi)
411 return 0; 411 return 0;
412} 412}
413 413
414static int __devexit sst25l_remove(struct spi_device *spi) 414static int sst25l_remove(struct spi_device *spi)
415{ 415{
416 struct sst25l_flash *flash = dev_get_drvdata(&spi->dev); 416 struct sst25l_flash *flash = dev_get_drvdata(&spi->dev);
417 int ret; 417 int ret;
@@ -428,7 +428,7 @@ static struct spi_driver sst25l_driver = {
428 .owner = THIS_MODULE, 428 .owner = THIS_MODULE,
429 }, 429 },
430 .probe = sst25l_probe, 430 .probe = sst25l_probe,
431 .remove = __devexit_p(sst25l_remove), 431 .remove = sst25l_remove,
432}; 432};
433 433
434module_spi_driver(sst25l_driver); 434module_spi_driver(sst25l_driver);
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index df304868bebb..62ba82c396c2 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -358,13 +358,6 @@ config MTD_IXP2000
358 IXP2000 based board and would like to use the flash chips on it, 358 IXP2000 based board and would like to use the flash chips on it,
359 say 'Y'. 359 say 'Y'.
360 360
361config MTD_FORTUNET
362 tristate "CFI Flash device mapped on the FortuNet board"
363 depends on MTD_CFI && SA1100_FORTUNET
364 help
365 This enables access to the Flash on the FortuNet board. If you
366 have such a board, say 'Y'.
367
368config MTD_AUTCPU12 361config MTD_AUTCPU12
369 bool "NV-RAM mapping AUTCPU12 board" 362 bool "NV-RAM mapping AUTCPU12 board"
370 depends on ARCH_AUTCPU12 363 depends on ARCH_AUTCPU12
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index a0240edd1961..4ded28711bc1 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -39,7 +39,6 @@ obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
39obj-$(CONFIG_MTD_PCI) += pci.o 39obj-$(CONFIG_MTD_PCI) += pci.o
40obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o 40obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
41obj-$(CONFIG_MTD_IMPA7) += impa7.o 41obj-$(CONFIG_MTD_IMPA7) += impa7.o
42obj-$(CONFIG_MTD_FORTUNET) += fortunet.o
43obj-$(CONFIG_MTD_UCLINUX) += uclinux.o 42obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
44obj-$(CONFIG_MTD_NETtel) += nettel.o 43obj-$(CONFIG_MTD_NETtel) += nettel.o
45obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o 44obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index e2875d6fe129..f7207b0a76dc 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -100,8 +100,8 @@ static void amd76xrom_cleanup(struct amd76xrom_window *window)
100} 100}
101 101
102 102
103static int __devinit amd76xrom_init_one (struct pci_dev *pdev, 103static int amd76xrom_init_one(struct pci_dev *pdev,
104 const struct pci_device_id *ent) 104 const struct pci_device_id *ent)
105{ 105{
106 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 106 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
107 u8 byte; 107 u8 byte;
@@ -289,7 +289,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
289} 289}
290 290
291 291
292static void __devexit amd76xrom_remove_one (struct pci_dev *pdev) 292static void amd76xrom_remove_one(struct pci_dev *pdev)
293{ 293{
294 struct amd76xrom_window *window = &amd76xrom_window; 294 struct amd76xrom_window *window = &amd76xrom_window;
295 295
@@ -347,4 +347,3 @@ module_exit(cleanup_amd76xrom);
347MODULE_LICENSE("GPL"); 347MODULE_LICENSE("GPL");
348MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>"); 348MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>");
349MODULE_DESCRIPTION("MTD map driver for BIOS chips on the AMD76X southbridge"); 349MODULE_DESCRIPTION("MTD map driver for BIOS chips on the AMD76X southbridge");
350
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index 76fb594bb1d9..a2dc2ae4b24e 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -33,7 +33,7 @@ struct autcpu12_nvram_priv {
33 struct map_info map; 33 struct map_info map;
34}; 34};
35 35
36static int __devinit autcpu12_nvram_probe(struct platform_device *pdev) 36static int autcpu12_nvram_probe(struct platform_device *pdev)
37{ 37{
38 map_word tmp, save0, save1; 38 map_word tmp, save0, save1;
39 struct resource *res; 39 struct resource *res;
@@ -105,7 +105,7 @@ static int __devinit autcpu12_nvram_probe(struct platform_device *pdev)
105 return -ENOMEM; 105 return -ENOMEM;
106} 106}
107 107
108static int __devexit autcpu12_nvram_remove(struct platform_device *pdev) 108static int autcpu12_nvram_remove(struct platform_device *pdev)
109{ 109{
110 struct autcpu12_nvram_priv *priv = platform_get_drvdata(pdev); 110 struct autcpu12_nvram_priv *priv = platform_get_drvdata(pdev);
111 111
@@ -121,7 +121,7 @@ static struct platform_driver autcpu12_nvram_driver = {
121 .owner = THIS_MODULE, 121 .owner = THIS_MODULE,
122 }, 122 },
123 .probe = autcpu12_nvram_probe, 123 .probe = autcpu12_nvram_probe,
124 .remove = __devexit_p(autcpu12_nvram_remove), 124 .remove = autcpu12_nvram_remove,
125}; 125};
126module_platform_driver(autcpu12_nvram_driver); 126module_platform_driver(autcpu12_nvram_driver);
127 127
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index ef5cde84a8b3..f833edfaab79 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -30,7 +30,8 @@
30#include <linux/io.h> 30#include <linux/io.h>
31#include <asm/unaligned.h> 31#include <asm/unaligned.h>
32 32
33#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); }) 33#define pr_devinit(fmt, args...) \
34 ({ static const char __fmt[] = fmt; printk(__fmt, ## args); })
34 35
35#define DRIVER_NAME "bfin-async-flash" 36#define DRIVER_NAME "bfin-async-flash"
36 37
@@ -123,7 +124,7 @@ static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const voi
123 124
124static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; 125static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
125 126
126static int __devinit bfin_flash_probe(struct platform_device *pdev) 127static int bfin_flash_probe(struct platform_device *pdev)
127{ 128{
128 int ret; 129 int ret;
129 struct physmap_flash_data *pdata = pdev->dev.platform_data; 130 struct physmap_flash_data *pdata = pdev->dev.platform_data;
@@ -172,7 +173,7 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
172 return 0; 173 return 0;
173} 174}
174 175
175static int __devexit bfin_flash_remove(struct platform_device *pdev) 176static int bfin_flash_remove(struct platform_device *pdev)
176{ 177{
177 struct async_state *state = platform_get_drvdata(pdev); 178 struct async_state *state = platform_get_drvdata(pdev);
178 gpio_free(state->enet_flash_pin); 179 gpio_free(state->enet_flash_pin);
@@ -184,7 +185,7 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev)
184 185
185static struct platform_driver bfin_flash_driver = { 186static struct platform_driver bfin_flash_driver = {
186 .probe = bfin_flash_probe, 187 .probe = bfin_flash_probe,
187 .remove = __devexit_p(bfin_flash_remove), 188 .remove = bfin_flash_remove,
188 .driver = { 189 .driver = {
189 .name = DRIVER_NAME, 190 .name = DRIVER_NAME,
190 }, 191 },
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 3d0e762fa5f2..586a1c77e48a 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -112,8 +112,8 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
112} 112}
113 113
114 114
115static int __devinit ck804xrom_init_one (struct pci_dev *pdev, 115static int ck804xrom_init_one(struct pci_dev *pdev,
116 const struct pci_device_id *ent) 116 const struct pci_device_id *ent)
117{ 117{
118 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 118 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
119 u8 byte; 119 u8 byte;
@@ -320,7 +320,7 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
320} 320}
321 321
322 322
323static void __devexit ck804xrom_remove_one (struct pci_dev *pdev) 323static void ck804xrom_remove_one(struct pci_dev *pdev)
324{ 324{
325 struct ck804xrom_window *window = &ck804xrom_window; 325 struct ck804xrom_window *window = &ck804xrom_window;
326 326
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 08322b1c3e81..ff8681a25831 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -144,7 +144,7 @@ static void esb2rom_cleanup(struct esb2rom_window *window)
144 pci_dev_put(window->pdev); 144 pci_dev_put(window->pdev);
145} 145}
146 146
147static int __devinit esb2rom_init_one(struct pci_dev *pdev, 147static int esb2rom_init_one(struct pci_dev *pdev,
148 const struct pci_device_id *ent) 148 const struct pci_device_id *ent)
149{ 149{
150 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 150 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
@@ -378,13 +378,13 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
378 return 0; 378 return 0;
379} 379}
380 380
381static void __devexit esb2rom_remove_one (struct pci_dev *pdev) 381static void esb2rom_remove_one(struct pci_dev *pdev)
382{ 382{
383 struct esb2rom_window *window = &esb2rom_window; 383 struct esb2rom_window *window = &esb2rom_window;
384 esb2rom_cleanup(window); 384 esb2rom_cleanup(window);
385} 385}
386 386
387static struct pci_device_id esb2rom_pci_tbl[] __devinitdata = { 387static struct pci_device_id esb2rom_pci_tbl[] = {
388 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, 388 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
389 PCI_ANY_ID, PCI_ANY_ID, }, 389 PCI_ANY_ID, PCI_ANY_ID, },
390 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, 390 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
deleted file mode 100644
index 956e2e4f30ea..000000000000
--- a/drivers/mtd/maps/fortunet.c
+++ /dev/null
@@ -1,277 +0,0 @@
1/* fortunet.c memory map
2 *
3 */
4
5#include <linux/module.h>
6#include <linux/types.h>
7#include <linux/kernel.h>
8#include <linux/init.h>
9#include <linux/string.h>
10
11#include <linux/mtd/mtd.h>
12#include <linux/mtd/map.h>
13#include <linux/mtd/partitions.h>
14
15#include <asm/io.h>
16
17#define MAX_NUM_REGIONS 4
18#define MAX_NUM_PARTITIONS 8
19
20#define DEF_WINDOW_ADDR_PHY 0x00000000
21#define DEF_WINDOW_SIZE 0x00800000 // 8 Mega Bytes
22
23#define MTD_FORTUNET_PK "MTD FortuNet: "
24
25#define MAX_NAME_SIZE 128
26
27struct map_region
28{
29 int window_addr_physical;
30 int altbankwidth;
31 struct map_info map_info;
32 struct mtd_info *mymtd;
33 struct mtd_partition parts[MAX_NUM_PARTITIONS];
34 char map_name[MAX_NAME_SIZE];
35 char parts_name[MAX_NUM_PARTITIONS][MAX_NAME_SIZE];
36};
37
38static struct map_region map_regions[MAX_NUM_REGIONS];
39static int map_regions_set[MAX_NUM_REGIONS] = {0,0,0,0};
40static int map_regions_parts[MAX_NUM_REGIONS] = {0,0,0,0};
41
42
43
44struct map_info default_map = {
45 .size = DEF_WINDOW_SIZE,
46 .bankwidth = 4,
47};
48
49static char * __init get_string_option(char *dest,int dest_size,char *sor)
50{
51 if(!dest_size)
52 return sor;
53 dest_size--;
54 while(*sor)
55 {
56 if(*sor==',')
57 {
58 sor++;
59 break;
60 }
61 else if(*sor=='\"')
62 {
63 sor++;
64 while(*sor)
65 {
66 if(*sor=='\"')
67 {
68 sor++;
69 break;
70 }
71 *dest = *sor;
72 dest++;
73 sor++;
74 dest_size--;
75 if(!dest_size)
76 {
77 *dest = 0;
78 return sor;
79 }
80 }
81 }
82 else
83 {
84 *dest = *sor;
85 dest++;
86 sor++;
87 dest_size--;
88 if(!dest_size)
89 {
90 *dest = 0;
91 return sor;
92 }
93 }
94 }
95 *dest = 0;
96 return sor;
97}
98
99static int __init MTD_New_Region(char *line)
100{
101 char string[MAX_NAME_SIZE];
102 int params[6];
103 get_options (get_string_option(string,sizeof(string),line),6,params);
104 if(params[0]<1)
105 {
106 printk(MTD_FORTUNET_PK "Bad parameters for MTD Region "
107 " name,region-number[,base,size,bankwidth,altbankwidth]\n");
108 return 1;
109 }
110 if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
111 {
112 printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
113 params[1],MAX_NUM_REGIONS-1);
114 return 1;
115 }
116 memset(&map_regions[params[1]],0,sizeof(map_regions[params[1]]));
117 memcpy(&map_regions[params[1]].map_info,
118 &default_map,sizeof(map_regions[params[1]].map_info));
119 map_regions_set[params[1]] = 1;
120 map_regions[params[1]].window_addr_physical = DEF_WINDOW_ADDR_PHY;
121 map_regions[params[1]].altbankwidth = 2;
122 map_regions[params[1]].mymtd = NULL;
123 map_regions[params[1]].map_info.name = map_regions[params[1]].map_name;
124 strcpy(map_regions[params[1]].map_info.name,string);
125 if(params[0]>1)
126 {
127 map_regions[params[1]].window_addr_physical = params[2];
128 }
129 if(params[0]>2)
130 {
131 map_regions[params[1]].map_info.size = params[3];
132 }
133 if(params[0]>3)
134 {
135 map_regions[params[1]].map_info.bankwidth = params[4];
136 }
137 if(params[0]>4)
138 {
139 map_regions[params[1]].altbankwidth = params[5];
140 }
141 return 1;
142}
143
144static int __init MTD_New_Partition(char *line)
145{
146 char string[MAX_NAME_SIZE];
147 int params[4];
148 get_options (get_string_option(string,sizeof(string),line),4,params);
149 if(params[0]<3)
150 {
151 printk(MTD_FORTUNET_PK "Bad parameters for MTD Partition "
152 " name,region-number,size,offset\n");
153 return 1;
154 }
155 if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
156 {
157 printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
158 params[1],MAX_NUM_REGIONS-1);
159 return 1;
160 }
161 if(map_regions_parts[params[1]]>=MAX_NUM_PARTITIONS)
162 {
163 printk(MTD_FORTUNET_PK "Out of space for partition in this region\n");
164 return 1;
165 }
166 map_regions[params[1]].parts[map_regions_parts[params[1]]].name =
167 map_regions[params[1]]. parts_name[map_regions_parts[params[1]]];
168 strcpy(map_regions[params[1]].parts[map_regions_parts[params[1]]].name,string);
169 map_regions[params[1]].parts[map_regions_parts[params[1]]].size =
170 params[2];
171 map_regions[params[1]].parts[map_regions_parts[params[1]]].offset =
172 params[3];
173 map_regions[params[1]].parts[map_regions_parts[params[1]]].mask_flags = 0;
174 map_regions_parts[params[1]]++;
175 return 1;
176}
177
178__setup("MTD_Region=", MTD_New_Region);
179__setup("MTD_Partition=", MTD_New_Partition);
180
181/* Backwards-spelling-compatibility */
182__setup("MTD_Partion=", MTD_New_Partition);
183
184static int __init init_fortunet(void)
185{
186 int ix,iy;
187 for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++)
188 {
189 if(map_regions_parts[ix]&&(!map_regions_set[ix]))
190 {
191 printk(MTD_FORTUNET_PK "Region %d is not setup (Setting to default)\n",
192 ix);
193 memset(&map_regions[ix],0,sizeof(map_regions[ix]));
194 memcpy(&map_regions[ix].map_info,&default_map,
195 sizeof(map_regions[ix].map_info));
196 map_regions_set[ix] = 1;
197 map_regions[ix].window_addr_physical = DEF_WINDOW_ADDR_PHY;
198 map_regions[ix].altbankwidth = 2;
199 map_regions[ix].mymtd = NULL;
200 map_regions[ix].map_info.name = map_regions[ix].map_name;
201 strcpy(map_regions[ix].map_info.name,"FORTUNET");
202 }
203 if(map_regions_set[ix])
204 {
205 iy++;
206 printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash device at physically "
207 " address %x size %x\n",
208 map_regions[ix].map_info.name,
209 map_regions[ix].window_addr_physical,
210 map_regions[ix].map_info.size);
211
212 map_regions[ix].map_info.phys = map_regions[ix].window_addr_physical,
213
214 map_regions[ix].map_info.virt =
215 ioremap_nocache(
216 map_regions[ix].window_addr_physical,
217 map_regions[ix].map_info.size);
218 if(!map_regions[ix].map_info.virt)
219 {
220 int j = 0;
221 printk(MTD_FORTUNET_PK "%s flash failed to ioremap!\n",
222 map_regions[ix].map_info.name);
223 for (j = 0 ; j < ix; j++)
224 iounmap(map_regions[j].map_info.virt);
225 return -ENXIO;
226 }
227 simple_map_init(&map_regions[ix].map_info);
228
229 printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash is virtually at: %x\n",
230 map_regions[ix].map_info.name,
231 map_regions[ix].map_info.virt);
232 map_regions[ix].mymtd = do_map_probe("cfi_probe",
233 &map_regions[ix].map_info);
234 if((!map_regions[ix].mymtd)&&(
235 map_regions[ix].altbankwidth!=map_regions[ix].map_info.bankwidth))
236 {
237 printk(KERN_NOTICE MTD_FORTUNET_PK "Trying alternate bankwidth "
238 "for %s flash.\n",
239 map_regions[ix].map_info.name);
240 map_regions[ix].map_info.bankwidth =
241 map_regions[ix].altbankwidth;
242 map_regions[ix].mymtd = do_map_probe("cfi_probe",
243 &map_regions[ix].map_info);
244 }
245 map_regions[ix].mymtd->owner = THIS_MODULE;
246 mtd_device_register(map_regions[ix].mymtd,
247 map_regions[ix].parts,
248 map_regions_parts[ix]);
249 }
250 }
251 if(iy)
252 return 0;
253 return -ENXIO;
254}
255
256static void __exit cleanup_fortunet(void)
257{
258 int ix;
259 for(ix=0;ix<MAX_NUM_REGIONS;ix++)
260 {
261 if(map_regions_set[ix])
262 {
263 if( map_regions[ix].mymtd )
264 {
265 mtd_device_unregister(map_regions[ix].mymtd);
266 map_destroy( map_regions[ix].mymtd );
267 }
268 iounmap((void *)map_regions[ix].map_info.virt);
269 }
270 }
271}
272
273module_init(init_fortunet);
274module_exit(cleanup_fortunet);
275
276MODULE_AUTHOR("FortuNet, Inc.");
277MODULE_DESCRIPTION("MTD map driver for FortuNet boards");
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index e4de96ba52b3..7b643de2500b 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -26,7 +26,8 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/types.h> 27#include <linux/types.h>
28 28
29#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); }) 29#define pr_devinit(fmt, args...) \
30 ({ static const char __fmt[] = fmt; printk(__fmt, ## args); })
30 31
31#define DRIVER_NAME "gpio-addr-flash" 32#define DRIVER_NAME "gpio-addr-flash"
32#define PFX DRIVER_NAME ": " 33#define PFX DRIVER_NAME ": "
@@ -142,7 +143,8 @@ static void gf_write(struct map_info *map, map_word d1, unsigned long ofs)
142 * 143 *
143 * See gf_copy_from() caveat. 144 * See gf_copy_from() caveat.
144 */ 145 */
145static void gf_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) 146static void gf_copy_to(struct map_info *map, unsigned long to,
147 const void *from, ssize_t len)
146{ 148{
147 struct async_state *state = gf_map_info_to_state(map); 149 struct async_state *state = gf_map_info_to_state(map);
148 150
@@ -185,7 +187,7 @@ static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
185 * ... 187 * ...
186 * }; 188 * };
187 */ 189 */
188static int __devinit gpio_flash_probe(struct platform_device *pdev) 190static int gpio_flash_probe(struct platform_device *pdev)
189{ 191{
190 size_t i, arr_size; 192 size_t i, arr_size;
191 struct physmap_flash_data *pdata; 193 struct physmap_flash_data *pdata;
@@ -258,7 +260,7 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
258 return 0; 260 return 0;
259} 261}
260 262
261static int __devexit gpio_flash_remove(struct platform_device *pdev) 263static int gpio_flash_remove(struct platform_device *pdev)
262{ 264{
263 struct async_state *state = platform_get_drvdata(pdev); 265 struct async_state *state = platform_get_drvdata(pdev);
264 size_t i = 0; 266 size_t i = 0;
@@ -273,7 +275,7 @@ static int __devexit gpio_flash_remove(struct platform_device *pdev)
273 275
274static struct platform_driver gpio_flash_driver = { 276static struct platform_driver gpio_flash_driver = {
275 .probe = gpio_flash_probe, 277 .probe = gpio_flash_probe,
276 .remove = __devexit_p(gpio_flash_remove), 278 .remove = gpio_flash_remove,
277 .driver = { 279 .driver = {
278 .name = DRIVER_NAME, 280 .name = DRIVER_NAME,
279 }, 281 },
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 6689dcb3124d..c7478e18f485 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -84,8 +84,8 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
84} 84}
85 85
86 86
87static int __devinit ichxrom_init_one (struct pci_dev *pdev, 87static int ichxrom_init_one(struct pci_dev *pdev,
88 const struct pci_device_id *ent) 88 const struct pci_device_id *ent)
89{ 89{
90 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 90 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
91 struct ichxrom_window *window = &ichxrom_window; 91 struct ichxrom_window *window = &ichxrom_window;
@@ -315,13 +315,13 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
315} 315}
316 316
317 317
318static void __devexit ichxrom_remove_one (struct pci_dev *pdev) 318static void ichxrom_remove_one(struct pci_dev *pdev)
319{ 319{
320 struct ichxrom_window *window = &ichxrom_window; 320 struct ichxrom_window *window = &ichxrom_window;
321 ichxrom_cleanup(window); 321 ichxrom_cleanup(window);
322} 322}
323 323
324static struct pci_device_id ichxrom_pci_tbl[] __devinitdata = { 324static struct pci_device_id ichxrom_pci_tbl[] = {
325 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, 325 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
326 PCI_ANY_ID, PCI_ANY_ID, }, 326 PCI_ANY_ID, PCI_ANY_ID, },
327 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, 327 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 93f03175c82d..3ee2ad1dcbe7 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -63,24 +63,24 @@ struct vr_nor_mtd {
63#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */ 63#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
64#define TIMING_MASK 0x3FFF0000 64#define TIMING_MASK 0x3FFF0000
65 65
66static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p) 66static void vr_nor_destroy_partitions(struct vr_nor_mtd *p)
67{ 67{
68 mtd_device_unregister(p->info); 68 mtd_device_unregister(p->info);
69} 69}
70 70
71static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p) 71static int vr_nor_init_partitions(struct vr_nor_mtd *p)
72{ 72{
73 /* register the flash bank */ 73 /* register the flash bank */
74 /* partition the flash bank */ 74 /* partition the flash bank */
75 return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0); 75 return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0);
76} 76}
77 77
78static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) 78static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
79{ 79{
80 map_destroy(p->info); 80 map_destroy(p->info);
81} 81}
82 82
83static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p) 83static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
84{ 84{
85 static const char *probe_types[] = 85 static const char *probe_types[] =
86 { "cfi_probe", "jedec_probe", NULL }; 86 { "cfi_probe", "jedec_probe", NULL };
@@ -96,7 +96,7 @@ static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p)
96 return 0; 96 return 0;
97} 97}
98 98
99static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p) 99static void vr_nor_destroy_maps(struct vr_nor_mtd *p)
100{ 100{
101 unsigned int exp_timing_cs0; 101 unsigned int exp_timing_cs0;
102 102
@@ -116,7 +116,7 @@ static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p)
116 * Initialize the map_info structure and map the flash. 116 * Initialize the map_info structure and map the flash.
117 * Returns 0 on success, nonzero otherwise. 117 * Returns 0 on success, nonzero otherwise.
118 */ 118 */
119static int __devinit vr_nor_init_maps(struct vr_nor_mtd *p) 119static int vr_nor_init_maps(struct vr_nor_mtd *p)
120{ 120{
121 unsigned long csr_phys, csr_len; 121 unsigned long csr_phys, csr_len;
122 unsigned long win_phys, win_len; 122 unsigned long win_phys, win_len;
@@ -176,7 +176,7 @@ static struct pci_device_id vr_nor_pci_ids[] = {
176 {0,} 176 {0,}
177}; 177};
178 178
179static void __devexit vr_nor_pci_remove(struct pci_dev *dev) 179static void vr_nor_pci_remove(struct pci_dev *dev)
180{ 180{
181 struct vr_nor_mtd *p = pci_get_drvdata(dev); 181 struct vr_nor_mtd *p = pci_get_drvdata(dev);
182 182
@@ -189,7 +189,7 @@ static void __devexit vr_nor_pci_remove(struct pci_dev *dev)
189 pci_disable_device(dev); 189 pci_disable_device(dev);
190} 190}
191 191
192static int __devinit 192static int
193vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) 193vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
194{ 194{
195 struct vr_nor_mtd *p = NULL; 195 struct vr_nor_mtd *p = NULL;
@@ -256,7 +256,7 @@ vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
256static struct pci_driver vr_nor_pci_driver = { 256static struct pci_driver vr_nor_pci_driver = {
257 .name = DRV_NAME, 257 .name = DRV_NAME,
258 .probe = vr_nor_pci_probe, 258 .probe = vr_nor_pci_probe,
259 .remove = __devexit_p(vr_nor_pci_remove), 259 .remove = vr_nor_pci_remove,
260 .id_table = vr_nor_pci_ids, 260 .id_table = vr_nor_pci_ids,
261}; 261};
262 262
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index c03456f17004..3c3c791eb96a 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -45,7 +45,7 @@ struct ltq_mtd {
45}; 45};
46 46
47static const char ltq_map_name[] = "ltq_nor"; 47static const char ltq_map_name[] = "ltq_nor";
48static const char *ltq_probe_types[] __devinitconst = { 48static const char *ltq_probe_types[] = {
49 "cmdlinepart", "ofpart", NULL }; 49 "cmdlinepart", "ofpart", NULL };
50 50
51static map_word 51static map_word
@@ -109,7 +109,7 @@ ltq_copy_to(struct map_info *map, unsigned long to,
109 spin_unlock_irqrestore(&ebu_lock, flags); 109 spin_unlock_irqrestore(&ebu_lock, flags);
110} 110}
111 111
112static int __devinit 112static int
113ltq_mtd_probe(struct platform_device *pdev) 113ltq_mtd_probe(struct platform_device *pdev)
114{ 114{
115 struct mtd_part_parser_data ppdata; 115 struct mtd_part_parser_data ppdata;
@@ -185,7 +185,7 @@ err_out:
185 return err; 185 return err;
186} 186}
187 187
188static int __devexit 188static int
189ltq_mtd_remove(struct platform_device *pdev) 189ltq_mtd_remove(struct platform_device *pdev)
190{ 190{
191 struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev); 191 struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
@@ -209,7 +209,7 @@ MODULE_DEVICE_TABLE(of, ltq_mtd_match);
209 209
210static struct platform_driver ltq_mtd_driver = { 210static struct platform_driver ltq_mtd_driver = {
211 .probe = ltq_mtd_probe, 211 .probe = ltq_mtd_probe,
212 .remove = __devexit_p(ltq_mtd_remove), 212 .remove = ltq_mtd_remove,
213 .driver = { 213 .driver = {
214 .name = "ltq-nor", 214 .name = "ltq-nor",
215 .owner = THIS_MODULE, 215 .owner = THIS_MODULE,
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 3c7ad17fca78..ab0fead56b83 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -125,7 +125,7 @@ static int latch_addr_flash_remove(struct platform_device *dev)
125 return 0; 125 return 0;
126} 126}
127 127
128static int __devinit latch_addr_flash_probe(struct platform_device *dev) 128static int latch_addr_flash_probe(struct platform_device *dev)
129{ 129{
130 struct latch_addr_flash_data *latch_addr_data; 130 struct latch_addr_flash_data *latch_addr_data;
131 struct latch_addr_flash_info *info; 131 struct latch_addr_flash_info *info;
@@ -218,7 +218,7 @@ done:
218 218
219static struct platform_driver latch_addr_flash_driver = { 219static struct platform_driver latch_addr_flash_driver = {
220 .probe = latch_addr_flash_probe, 220 .probe = latch_addr_flash_probe,
221 .remove = __devexit_p(latch_addr_flash_remove), 221 .remove = latch_addr_flash_remove,
222 .driver = { 222 .driver = {
223 .name = DRIVER_NAME, 223 .name = DRIVER_NAME,
224 }, 224 },
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 1c30c1a307f4..ed82914966f5 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -253,7 +253,7 @@ static struct pci_device_id mtd_pci_ids[] = {
253 * Generic code follows. 253 * Generic code follows.
254 */ 254 */
255 255
256static int __devinit 256static int
257mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) 257mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
258{ 258{
259 struct mtd_pci_info *info = (struct mtd_pci_info *)id->driver_data; 259 struct mtd_pci_info *info = (struct mtd_pci_info *)id->driver_data;
@@ -308,7 +308,7 @@ out:
308 return err; 308 return err;
309} 309}
310 310
311static void __devexit 311static void
312mtd_pci_remove(struct pci_dev *dev) 312mtd_pci_remove(struct pci_dev *dev)
313{ 313{
314 struct mtd_info *mtd = pci_get_drvdata(dev); 314 struct mtd_info *mtd = pci_get_drvdata(dev);
@@ -326,7 +326,7 @@ mtd_pci_remove(struct pci_dev *dev)
326static struct pci_driver mtd_pci_driver = { 326static struct pci_driver mtd_pci_driver = {
327 .name = "MTD PCI", 327 .name = "MTD PCI",
328 .probe = mtd_pci_probe, 328 .probe = mtd_pci_probe,
329 .remove = __devexit_p(mtd_pci_remove), 329 .remove = mtd_pci_remove,
330 .id_table = mtd_pci_ids, 330 .id_table = mtd_pci_ids,
331}; 331};
332 332
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 6f19acadb06c..37cdc201652f 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -77,7 +77,7 @@ static int of_flash_remove(struct platform_device *dev)
77/* Helper function to handle probing of the obsolete "direct-mapped" 77/* Helper function to handle probing of the obsolete "direct-mapped"
78 * compatible binding, which has an extra "probe-type" property 78 * compatible binding, which has an extra "probe-type" property
79 * describing the type of flash probe necessary. */ 79 * describing the type of flash probe necessary. */
80static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev, 80static struct mtd_info *obsolete_probe(struct platform_device *dev,
81 struct map_info *map) 81 struct map_info *map)
82{ 82{
83 struct device_node *dp = dev->dev.of_node; 83 struct device_node *dp = dev->dev.of_node;
@@ -116,7 +116,7 @@ static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
116 information. */ 116 information. */
117static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", 117static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot",
118 "ofpart", "ofoldpart", NULL }; 118 "ofpart", "ofoldpart", NULL };
119static const char ** __devinit of_get_probes(struct device_node *dp) 119static const char **of_get_probes(struct device_node *dp)
120{ 120{
121 const char *cp; 121 const char *cp;
122 int cplen; 122 int cplen;
@@ -145,14 +145,14 @@ static const char ** __devinit of_get_probes(struct device_node *dp)
145 return res; 145 return res;
146} 146}
147 147
148static void __devinit of_free_probes(const char **probes) 148static void of_free_probes(const char **probes)
149{ 149{
150 if (probes != part_probe_types_def) 150 if (probes != part_probe_types_def)
151 kfree(probes); 151 kfree(probes);
152} 152}
153 153
154static struct of_device_id of_flash_match[]; 154static struct of_device_id of_flash_match[];
155static int __devinit of_flash_probe(struct platform_device *dev) 155static int of_flash_probe(struct platform_device *dev)
156{ 156{
157 const char **part_probe_types; 157 const char **part_probe_types;
158 const struct of_device_id *match; 158 const struct of_device_id *match;
@@ -170,6 +170,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
170 resource_size_t res_size; 170 resource_size_t res_size;
171 struct mtd_part_parser_data ppdata; 171 struct mtd_part_parser_data ppdata;
172 bool map_indirect; 172 bool map_indirect;
173 const char *mtd_name;
173 174
174 match = of_match_device(of_flash_match, &dev->dev); 175 match = of_match_device(of_flash_match, &dev->dev);
175 if (!match) 176 if (!match)
@@ -178,6 +179,8 @@ static int __devinit of_flash_probe(struct platform_device *dev)
178 179
179 reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); 180 reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
180 181
182 of_property_read_string(dp, "linux,mtd-name", &mtd_name);
183
181 /* 184 /*
182 * Get number of "reg" tuples. Scan for MTD devices on area's 185 * Get number of "reg" tuples. Scan for MTD devices on area's
183 * described by each "reg" region. This makes it possible (including 186 * described by each "reg" region. This makes it possible (including
@@ -234,7 +237,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
234 goto err_out; 237 goto err_out;
235 } 238 }
236 239
237 info->list[i].map.name = dev_name(&dev->dev); 240 info->list[i].map.name = mtd_name ?: dev_name(&dev->dev);
238 info->list[i].map.phys = res.start; 241 info->list[i].map.phys = res.start;
239 info->list[i].map.size = res_size; 242 info->list[i].map.size = res_size;
240 info->list[i].map.bankwidth = be32_to_cpup(width); 243 info->list[i].map.bankwidth = be32_to_cpup(width);
@@ -282,6 +285,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
282 } 285 }
283 286
284 err = 0; 287 err = 0;
288 info->cmtd = NULL;
285 if (info->list_size == 1) { 289 if (info->list_size == 1) {
286 info->cmtd = info->list[0].mtd; 290 info->cmtd = info->list[0].mtd;
287 } else if (info->list_size > 1) { 291 } else if (info->list_size > 1) {
@@ -290,9 +294,10 @@ static int __devinit of_flash_probe(struct platform_device *dev)
290 */ 294 */
291 info->cmtd = mtd_concat_create(mtd_list, info->list_size, 295 info->cmtd = mtd_concat_create(mtd_list, info->list_size,
292 dev_name(&dev->dev)); 296 dev_name(&dev->dev));
293 if (info->cmtd == NULL)
294 err = -ENXIO;
295 } 297 }
298 if (info->cmtd == NULL)
299 err = -ENXIO;
300
296 if (err) 301 if (err)
297 goto err_out; 302 goto err_out;
298 303
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
index 65bd1cd4d627..afea93b515d5 100644
--- a/drivers/mtd/maps/pismo.c
+++ b/drivers/mtd/maps/pismo.c
@@ -58,7 +58,7 @@ static void pismo_set_vpp(struct platform_device *pdev, int on)
58 pismo->vpp(pismo->vpp_data, on); 58 pismo->vpp(pismo->vpp_data, on);
59} 59}
60 60
61static unsigned int __devinit pismo_width_to_bytes(unsigned int width) 61static unsigned int pismo_width_to_bytes(unsigned int width)
62{ 62{
63 width &= 15; 63 width &= 15;
64 if (width > 2) 64 if (width > 2)
@@ -66,7 +66,7 @@ static unsigned int __devinit pismo_width_to_bytes(unsigned int width)
66 return 1 << width; 66 return 1 << width;
67} 67}
68 68
69static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf, 69static int pismo_eeprom_read(struct i2c_client *client, void *buf,
70 u8 addr, size_t size) 70 u8 addr, size_t size)
71{ 71{
72 int ret; 72 int ret;
@@ -88,7 +88,7 @@ static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf,
88 return ret == ARRAY_SIZE(msg) ? size : -EIO; 88 return ret == ARRAY_SIZE(msg) ? size : -EIO;
89} 89}
90 90
91static int __devinit pismo_add_device(struct pismo_data *pismo, int i, 91static int pismo_add_device(struct pismo_data *pismo, int i,
92 struct pismo_mem *region, const char *name, void *pdata, size_t psize) 92 struct pismo_mem *region, const char *name, void *pdata, size_t psize)
93{ 93{
94 struct platform_device *dev; 94 struct platform_device *dev;
@@ -129,7 +129,7 @@ static int __devinit pismo_add_device(struct pismo_data *pismo, int i,
129 return ret; 129 return ret;
130} 130}
131 131
132static int __devinit pismo_add_nor(struct pismo_data *pismo, int i, 132static int pismo_add_nor(struct pismo_data *pismo, int i,
133 struct pismo_mem *region) 133 struct pismo_mem *region)
134{ 134{
135 struct physmap_flash_data data = { 135 struct physmap_flash_data data = {
@@ -143,7 +143,7 @@ static int __devinit pismo_add_nor(struct pismo_data *pismo, int i,
143 &data, sizeof(data)); 143 &data, sizeof(data));
144} 144}
145 145
146static int __devinit pismo_add_sram(struct pismo_data *pismo, int i, 146static int pismo_add_sram(struct pismo_data *pismo, int i,
147 struct pismo_mem *region) 147 struct pismo_mem *region)
148{ 148{
149 struct platdata_mtd_ram data = { 149 struct platdata_mtd_ram data = {
@@ -154,7 +154,7 @@ static int __devinit pismo_add_sram(struct pismo_data *pismo, int i,
154 &data, sizeof(data)); 154 &data, sizeof(data));
155} 155}
156 156
157static void __devinit pismo_add_one(struct pismo_data *pismo, int i, 157static void pismo_add_one(struct pismo_data *pismo, int i,
158 const struct pismo_cs_block *cs, phys_addr_t base) 158 const struct pismo_cs_block *cs, phys_addr_t base)
159{ 159{
160 struct device *dev = &pismo->client->dev; 160 struct device *dev = &pismo->client->dev;
@@ -197,7 +197,7 @@ static void __devinit pismo_add_one(struct pismo_data *pismo, int i,
197 } 197 }
198} 198}
199 199
200static int __devexit pismo_remove(struct i2c_client *client) 200static int pismo_remove(struct i2c_client *client)
201{ 201{
202 struct pismo_data *pismo = i2c_get_clientdata(client); 202 struct pismo_data *pismo = i2c_get_clientdata(client);
203 int i; 203 int i;
@@ -210,7 +210,7 @@ static int __devexit pismo_remove(struct i2c_client *client)
210 return 0; 210 return 0;
211} 211}
212 212
213static int __devinit pismo_probe(struct i2c_client *client, 213static int pismo_probe(struct i2c_client *client,
214 const struct i2c_device_id *id) 214 const struct i2c_device_id *id)
215{ 215{
216 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); 216 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
@@ -267,7 +267,7 @@ static struct i2c_driver pismo_driver = {
267 .owner = THIS_MODULE, 267 .owner = THIS_MODULE,
268 }, 268 },
269 .probe = pismo_probe, 269 .probe = pismo_probe,
270 .remove = __devexit_p(pismo_remove), 270 .remove = pismo_remove,
271 .id_table = pismo_id, 271 .id_table = pismo_id,
272}; 272};
273 273
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 81884c277405..43e3dbb976d9 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -49,7 +49,7 @@ struct pxa2xx_flash_info {
49static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; 49static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
50 50
51 51
52static int __devinit pxa2xx_flash_probe(struct platform_device *pdev) 52static int pxa2xx_flash_probe(struct platform_device *pdev)
53{ 53{
54 struct flash_platform_data *flash = pdev->dev.platform_data; 54 struct flash_platform_data *flash = pdev->dev.platform_data;
55 struct pxa2xx_flash_info *info; 55 struct pxa2xx_flash_info *info;
@@ -105,7 +105,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
105 return 0; 105 return 0;
106} 106}
107 107
108static int __devexit pxa2xx_flash_remove(struct platform_device *dev) 108static int pxa2xx_flash_remove(struct platform_device *dev)
109{ 109{
110 struct pxa2xx_flash_info *info = platform_get_drvdata(dev); 110 struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
111 111
@@ -139,7 +139,7 @@ static struct platform_driver pxa2xx_flash_driver = {
139 .owner = THIS_MODULE, 139 .owner = THIS_MODULE,
140 }, 140 },
141 .probe = pxa2xx_flash_probe, 141 .probe = pxa2xx_flash_probe,
142 .remove = __devexit_p(pxa2xx_flash_remove), 142 .remove = pxa2xx_flash_remove,
143 .shutdown = pxa2xx_flash_shutdown, 143 .shutdown = pxa2xx_flash_shutdown,
144}; 144};
145 145
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index a675bdbcb0fe..f694417cf7e6 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -149,8 +149,8 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
149 plat->exit(); 149 plat->exit();
150} 150}
151 151
152static struct sa_info *__devinit 152static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
153sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat) 153 struct flash_platform_data *plat)
154{ 154{
155 struct sa_info *info; 155 struct sa_info *info;
156 int nr, size, i, ret = 0; 156 int nr, size, i, ret = 0;
@@ -246,7 +246,7 @@ sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
246 246
247static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL }; 247static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
248 248
249static int __devinit sa1100_mtd_probe(struct platform_device *pdev) 249static int sa1100_mtd_probe(struct platform_device *pdev)
250{ 250{
251 struct flash_platform_data *plat = pdev->dev.platform_data; 251 struct flash_platform_data *plat = pdev->dev.platform_data;
252 struct sa_info *info; 252 struct sa_info *info;
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 9dcbc684abdb..71796137e97b 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -69,7 +69,7 @@ static struct map_info scb2_map = {
69}; 69};
70static int region_fail; 70static int region_fail;
71 71
72static int __devinit 72static int
73scb2_fixup_mtd(struct mtd_info *mtd) 73scb2_fixup_mtd(struct mtd_info *mtd)
74{ 74{
75 int i; 75 int i;
@@ -133,7 +133,7 @@ scb2_fixup_mtd(struct mtd_info *mtd)
133/* CSB5's 'Function Control Register' has bits for decoding @ >= 0xffc00000 */ 133/* CSB5's 'Function Control Register' has bits for decoding @ >= 0xffc00000 */
134#define CSB5_FCR 0x41 134#define CSB5_FCR 0x41
135#define CSB5_FCR_DECODE_ALL 0x0e 135#define CSB5_FCR_DECODE_ALL 0x0e
136static int __devinit 136static int
137scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent) 137scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
138{ 138{
139 u8 reg; 139 u8 reg;
@@ -197,7 +197,7 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
197 return 0; 197 return 0;
198} 198}
199 199
200static void __devexit 200static void
201scb2_flash_remove(struct pci_dev *dev) 201scb2_flash_remove(struct pci_dev *dev)
202{ 202{
203 if (!scb2_mtd) 203 if (!scb2_mtd)
@@ -231,7 +231,7 @@ static struct pci_driver scb2_flash_driver = {
231 .name = "Intel SCB2 BIOS Flash", 231 .name = "Intel SCB2 BIOS Flash",
232 .id_table = scb2_flash_pci_ids, 232 .id_table = scb2_flash_pci_ids,
233 .probe = scb2_flash_probe, 233 .probe = scb2_flash_probe,
234 .remove = __devexit_p(scb2_flash_remove), 234 .remove = scb2_flash_remove,
235}; 235};
236 236
237module_pci_driver(scb2_flash_driver); 237module_pci_driver(scb2_flash_driver);
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 175e537b444f..d467f3b11c96 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -108,7 +108,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
108 return 0; 108 return 0;
109} 109}
110 110
111static int __devinit uflash_probe(struct platform_device *op) 111static int uflash_probe(struct platform_device *op)
112{ 112{
113 struct device_node *dp = op->dev.of_node; 113 struct device_node *dp = op->dev.of_node;
114 114
@@ -121,7 +121,7 @@ static int __devinit uflash_probe(struct platform_device *op)
121 return uflash_devinit(op, dp); 121 return uflash_devinit(op, dp);
122} 122}
123 123
124static int __devexit uflash_remove(struct platform_device *op) 124static int uflash_remove(struct platform_device *op)
125{ 125{
126 struct uflash_dev *up = dev_get_drvdata(&op->dev); 126 struct uflash_dev *up = dev_get_drvdata(&op->dev);
127 127
@@ -155,7 +155,7 @@ static struct platform_driver uflash_driver = {
155 .of_match_table = uflash_match, 155 .of_match_table = uflash_match,
156 }, 156 },
157 .probe = uflash_probe, 157 .probe = uflash_probe,
158 .remove = __devexit_p(uflash_remove), 158 .remove = uflash_remove,
159}; 159};
160 160
161module_platform_driver(uflash_driver); 161module_platform_driver(uflash_driver);
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 2e2b0945edc7..6b223cfe92b7 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -596,7 +596,7 @@ fail_name:
596} 596}
597 597
598/* Handles very basic info about the flash, queries for details */ 598/* Handles very basic info about the flash, queries for details */
599static int __devinit vmu_connect(struct maple_device *mdev) 599static int vmu_connect(struct maple_device *mdev)
600{ 600{
601 unsigned long test_flash_data, basic_flash_data; 601 unsigned long test_flash_data, basic_flash_data;
602 int c, error; 602 int c, error;
@@ -690,7 +690,7 @@ fail_nomem:
690 return error; 690 return error;
691} 691}
692 692
693static void __devexit vmu_disconnect(struct maple_device *mdev) 693static void vmu_disconnect(struct maple_device *mdev)
694{ 694{
695 struct memcard *card; 695 struct memcard *card;
696 struct mdev_part *mpart; 696 struct mdev_part *mpart;
@@ -772,7 +772,7 @@ static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
772} 772}
773 773
774 774
775static int __devinit probe_maple_vmu(struct device *dev) 775static int probe_maple_vmu(struct device *dev)
776{ 776{
777 int error; 777 int error;
778 struct maple_device *mdev = to_maple_dev(dev); 778 struct maple_device *mdev = to_maple_dev(dev);
@@ -789,7 +789,7 @@ static int __devinit probe_maple_vmu(struct device *dev)
789 return 0; 789 return 0;
790} 790}
791 791
792static int __devexit remove_maple_vmu(struct device *dev) 792static int remove_maple_vmu(struct device *dev)
793{ 793{
794 struct maple_device *mdev = to_maple_dev(dev); 794 struct maple_device *mdev = to_maple_dev(dev);
795 795
@@ -802,7 +802,7 @@ static struct maple_driver vmu_flash_driver = {
802 .drv = { 802 .drv = {
803 .name = "Dreamcast_visual_memory", 803 .name = "Dreamcast_visual_memory",
804 .probe = probe_maple_vmu, 804 .probe = probe_maple_vmu,
805 .remove = __devexit_p(remove_maple_vmu), 805 .remove = remove_maple_vmu,
806 }, 806 },
807}; 807};
808 808
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f1f06715d4e0..5ad39bb5ab4c 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -32,7 +32,6 @@
32#include <linux/hdreg.h> 32#include <linux/hdreg.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/kthread.h>
36#include <asm/uaccess.h> 35#include <asm/uaccess.h>
37 36
38#include "mtdcore.h" 37#include "mtdcore.h"
@@ -121,16 +120,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
121 120
122int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev) 121int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
123{ 122{
124 if (kthread_should_stop())
125 return 1;
126
127 return dev->bg_stop; 123 return dev->bg_stop;
128} 124}
129EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background); 125EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
130 126
131static int mtd_blktrans_thread(void *arg) 127static void mtd_blktrans_work(struct work_struct *work)
132{ 128{
133 struct mtd_blktrans_dev *dev = arg; 129 struct mtd_blktrans_dev *dev =
130 container_of(work, struct mtd_blktrans_dev, work);
134 struct mtd_blktrans_ops *tr = dev->tr; 131 struct mtd_blktrans_ops *tr = dev->tr;
135 struct request_queue *rq = dev->rq; 132 struct request_queue *rq = dev->rq;
136 struct request *req = NULL; 133 struct request *req = NULL;
@@ -138,7 +135,7 @@ static int mtd_blktrans_thread(void *arg)
138 135
139 spin_lock_irq(rq->queue_lock); 136 spin_lock_irq(rq->queue_lock);
140 137
141 while (!kthread_should_stop()) { 138 while (1) {
142 int res; 139 int res;
143 140
144 dev->bg_stop = false; 141 dev->bg_stop = false;
@@ -156,15 +153,7 @@ static int mtd_blktrans_thread(void *arg)
156 background_done = !dev->bg_stop; 153 background_done = !dev->bg_stop;
157 continue; 154 continue;
158 } 155 }
159 set_current_state(TASK_INTERRUPTIBLE); 156 break;
160
161 if (kthread_should_stop())
162 set_current_state(TASK_RUNNING);
163
164 spin_unlock_irq(rq->queue_lock);
165 schedule();
166 spin_lock_irq(rq->queue_lock);
167 continue;
168 } 157 }
169 158
170 spin_unlock_irq(rq->queue_lock); 159 spin_unlock_irq(rq->queue_lock);
@@ -185,8 +174,6 @@ static int mtd_blktrans_thread(void *arg)
185 __blk_end_request_all(req, -EIO); 174 __blk_end_request_all(req, -EIO);
186 175
187 spin_unlock_irq(rq->queue_lock); 176 spin_unlock_irq(rq->queue_lock);
188
189 return 0;
190} 177}
191 178
192static void mtd_blktrans_request(struct request_queue *rq) 179static void mtd_blktrans_request(struct request_queue *rq)
@@ -199,10 +186,8 @@ static void mtd_blktrans_request(struct request_queue *rq)
199 if (!dev) 186 if (!dev)
200 while ((req = blk_fetch_request(rq)) != NULL) 187 while ((req = blk_fetch_request(rq)) != NULL)
201 __blk_end_request_all(req, -ENODEV); 188 __blk_end_request_all(req, -ENODEV);
202 else { 189 else
203 dev->bg_stop = true; 190 queue_work(dev->wq, &dev->work);
204 wake_up_process(dev->thread);
205 }
206} 191}
207 192
208static int blktrans_open(struct block_device *bdev, fmode_t mode) 193static int blktrans_open(struct block_device *bdev, fmode_t mode)
@@ -325,7 +310,7 @@ unlock:
325 return ret; 310 return ret;
326} 311}
327 312
328static const struct block_device_operations mtd_blktrans_ops = { 313static const struct block_device_operations mtd_block_ops = {
329 .owner = THIS_MODULE, 314 .owner = THIS_MODULE,
330 .open = blktrans_open, 315 .open = blktrans_open,
331 .release = blktrans_release, 316 .release = blktrans_release,
@@ -401,7 +386,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
401 gd->private_data = new; 386 gd->private_data = new;
402 gd->major = tr->major; 387 gd->major = tr->major;
403 gd->first_minor = (new->devnum) << tr->part_bits; 388 gd->first_minor = (new->devnum) << tr->part_bits;
404 gd->fops = &mtd_blktrans_ops; 389 gd->fops = &mtd_block_ops;
405 390
406 if (tr->part_bits) 391 if (tr->part_bits)
407 if (new->devnum < 26) 392 if (new->devnum < 26)
@@ -437,14 +422,13 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
437 422
438 gd->queue = new->rq; 423 gd->queue = new->rq;
439 424
440 /* Create processing thread */ 425 /* Create processing workqueue */
441 /* TODO: workqueue ? */ 426 new->wq = alloc_workqueue("%s%d", 0, 0,
442 new->thread = kthread_run(mtd_blktrans_thread, new, 427 tr->name, new->mtd->index);
443 "%s%d", tr->name, new->mtd->index); 428 if (!new->wq)
444 if (IS_ERR(new->thread)) {
445 ret = PTR_ERR(new->thread);
446 goto error4; 429 goto error4;
447 } 430 INIT_WORK(&new->work, mtd_blktrans_work);
431
448 gd->driverfs_dev = &new->mtd->dev; 432 gd->driverfs_dev = &new->mtd->dev;
449 433
450 if (new->readonly) 434 if (new->readonly)
@@ -484,9 +468,8 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
484 /* Stop new requests to arrive */ 468 /* Stop new requests to arrive */
485 del_gendisk(old->disk); 469 del_gendisk(old->disk);
486 470
487 471 /* Stop workqueue. This will perform any pending request. */
488 /* Stop the thread */ 472 destroy_workqueue(old->wq);
489 kthread_stop(old->thread);
490 473
491 /* Kill current requests */ 474 /* Kill current requests */
492 spin_lock_irqsave(&old->queue_lock, flags); 475 spin_lock_irqsave(&old->queue_lock, flags);
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index f5b3f91fa1cc..97bb8f6304d4 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -271,7 +271,7 @@ static void find_next_position(struct mtdoops_context *cxt)
271 271
272 if (count[0] == 0xffffffff && count[1] == 0xffffffff) 272 if (count[0] == 0xffffffff && count[1] == 0xffffffff)
273 mark_page_unused(cxt, page); 273 mark_page_unused(cxt, page);
274 if (count[0] == 0xffffffff) 274 if (count[0] == 0xffffffff || count[1] != MTDOOPS_KERNMSG_MAGIC)
275 continue; 275 continue;
276 if (maxcount == 0xffffffff) { 276 if (maxcount == 0xffffffff) {
277 maxcount = count[0]; 277 maxcount = count[0];
@@ -289,14 +289,13 @@ static void find_next_position(struct mtdoops_context *cxt)
289 } 289 }
290 } 290 }
291 if (maxcount == 0xffffffff) { 291 if (maxcount == 0xffffffff) {
292 cxt->nextpage = 0; 292 cxt->nextpage = cxt->oops_pages - 1;
293 cxt->nextcount = 1; 293 cxt->nextcount = 0;
294 schedule_work(&cxt->work_erase); 294 }
295 return; 295 else {
296 cxt->nextpage = maxpos;
297 cxt->nextcount = maxcount;
296 } 298 }
297
298 cxt->nextpage = maxpos;
299 cxt->nextcount = maxcount;
300 299
301 mtdoops_inc_counter(cxt); 300 mtdoops_inc_counter(cxt);
302} 301}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index dae191b3c081..5819eb575210 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -50,16 +50,30 @@ config MTD_NAND_MUSEUM_IDS
50 of these chips were reused by later, larger chips. 50 of these chips were reused by later, larger chips.
51 51
52config MTD_NAND_DENALI 52config MTD_NAND_DENALI
53 depends on PCI 53 tristate "Support Denali NAND controller"
54 help
55 Enable support for the Denali NAND controller. This should be
56 combined with either the PCI or platform drivers to provide device
57 registration.
58
59config MTD_NAND_DENALI_PCI
54 tristate "Support Denali NAND controller on Intel Moorestown" 60 tristate "Support Denali NAND controller on Intel Moorestown"
61 depends on PCI && MTD_NAND_DENALI
55 help 62 help
56 Enable the driver for NAND flash on Intel Moorestown, using the 63 Enable the driver for NAND flash on Intel Moorestown, using the
57 Denali NAND controller core. 64 Denali NAND controller core.
58 65
66config MTD_NAND_DENALI_DT
67 tristate "Support Denali NAND controller as a DT device"
68 depends on HAVE_CLK && MTD_NAND_DENALI
69 help
70 Enable the driver for NAND flash on platforms using a Denali NAND
71 controller as a DT device.
72
59config MTD_NAND_DENALI_SCRATCH_REG_ADDR 73config MTD_NAND_DENALI_SCRATCH_REG_ADDR
60 hex "Denali NAND size scratch register address" 74 hex "Denali NAND size scratch register address"
61 default "0xFF108018" 75 default "0xFF108018"
62 depends on MTD_NAND_DENALI 76 depends on MTD_NAND_DENALI_PCI
63 help 77 help
64 Some platforms place the NAND chip size in a scratch register 78 Some platforms place the NAND chip size in a scratch register
65 because (some versions of) the driver aren't able to automatically 79 because (some versions of) the driver aren't able to automatically
@@ -433,6 +447,14 @@ config MTD_NAND_GPMI_NAND
433 block, such as SD card. So pay attention to it when you enable 447 block, such as SD card. So pay attention to it when you enable
434 the GPMI. 448 the GPMI.
435 449
450config MTD_NAND_BCM47XXNFLASH
451 tristate "Support for NAND flash on BCM4706 BCMA bus"
452 depends on BCMA_NFLASH
453 help
454 BCMA bus can have various flash memories attached, they are
455 registered by bcma as platform devices. This enables driver for
456 NAND flash memories. For now only BCM4706 is supported.
457
436config MTD_NAND_PLATFORM 458config MTD_NAND_PLATFORM
437 tristate "Support for generic platform NAND driver" 459 tristate "Support for generic platform NAND driver"
438 depends on HAS_IOMEM 460 depends on HAS_IOMEM
@@ -499,12 +521,6 @@ config MTD_NAND_MXC
499 This enables the driver for the NAND flash controller on the 521 This enables the driver for the NAND flash controller on the
500 MXC processors. 522 MXC processors.
501 523
502config MTD_NAND_NOMADIK
503 tristate "ST Nomadik 8815 NAND support"
504 depends on ARCH_NOMADIK
505 help
506 Driver for the NAND flash controller on the Nomadik, with ECC.
507
508config MTD_NAND_SH_FLCTL 524config MTD_NAND_SH_FLCTL
509 tristate "Support for NAND on Renesas SuperH FLCTL" 525 tristate "Support for NAND on Renesas SuperH FLCTL"
510 depends on SUPERH || ARCH_SHMOBILE 526 depends on SUPERH || ARCH_SHMOBILE
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 6c7f2b3ca8ae..d76d91205691 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
11obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o 11obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
12obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o 12obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
13obj-$(CONFIG_MTD_NAND_DENALI) += denali.o 13obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
14obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o
15obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o
14obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o 16obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
15obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o 17obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
16obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o 18obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
@@ -45,11 +47,11 @@ obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
45obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o 47obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
46obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o 48obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
47obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o 49obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
48obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
49obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o 50obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
50obj-$(CONFIG_MTD_NAND_RICOH) += r852.o 51obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
51obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o 52obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
52obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/ 53obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
53obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o 54obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
55obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
54 56
55nand-objs := nand_base.o nand_bbt.o 57nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 9e7723aa7acc..f1d71cdc8aac 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -173,7 +173,7 @@ static const struct gpio _mandatory_gpio[] = {
173/* 173/*
174 * Main initialization routine 174 * Main initialization routine
175 */ 175 */
176static int __devinit ams_delta_init(struct platform_device *pdev) 176static int ams_delta_init(struct platform_device *pdev)
177{ 177{
178 struct nand_chip *this; 178 struct nand_chip *this;
179 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 179 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -270,7 +270,7 @@ out_free:
270/* 270/*
271 * Clean up routine 271 * Clean up routine
272 */ 272 */
273static int __devexit ams_delta_cleanup(struct platform_device *pdev) 273static int ams_delta_cleanup(struct platform_device *pdev)
274{ 274{
275 void __iomem *io_base = platform_get_drvdata(pdev); 275 void __iomem *io_base = platform_get_drvdata(pdev);
276 276
@@ -289,7 +289,7 @@ static int __devexit ams_delta_cleanup(struct platform_device *pdev)
289 289
290static struct platform_driver ams_delta_nand_driver = { 290static struct platform_driver ams_delta_nand_driver = {
291 .probe = ams_delta_init, 291 .probe = ams_delta_init,
292 .remove = __devexit_p(ams_delta_cleanup), 292 .remove = ams_delta_cleanup,
293 .driver = { 293 .driver = {
294 .name = "ams-delta-nand", 294 .name = "ams-delta-nand",
295 .owner = THIS_MODULE, 295 .owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 92623ac2015a..90bdca61c797 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -331,13 +331,13 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
331 * 12-bits 20-bytes 21-bytes 331 * 12-bits 20-bytes 21-bytes
332 * 24-bits 39-bytes 42-bytes 332 * 24-bits 39-bytes 42-bytes
333 */ 333 */
334static int __devinit pmecc_get_ecc_bytes(int cap, int sector_size) 334static int pmecc_get_ecc_bytes(int cap, int sector_size)
335{ 335{
336 int m = 12 + sector_size / 512; 336 int m = 12 + sector_size / 512;
337 return (m * cap + 7) / 8; 337 return (m * cap + 7) / 8;
338} 338}
339 339
340static void __devinit pmecc_config_ecc_layout(struct nand_ecclayout *layout, 340static void pmecc_config_ecc_layout(struct nand_ecclayout *layout,
341 int oobsize, int ecc_len) 341 int oobsize, int ecc_len)
342{ 342{
343 int i; 343 int i;
@@ -353,7 +353,7 @@ static void __devinit pmecc_config_ecc_layout(struct nand_ecclayout *layout,
353 oobsize - ecc_len - layout->oobfree[0].offset; 353 oobsize - ecc_len - layout->oobfree[0].offset;
354} 354}
355 355
356static void __devinit __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host) 356static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
357{ 357{
358 int table_size; 358 int table_size;
359 359
@@ -375,7 +375,7 @@ static void pmecc_data_free(struct atmel_nand_host *host)
375 kfree(host->pmecc_delta); 375 kfree(host->pmecc_delta);
376} 376}
377 377
378static int __devinit pmecc_data_alloc(struct atmel_nand_host *host) 378static int pmecc_data_alloc(struct atmel_nand_host *host)
379{ 379{
380 const int cap = host->pmecc_corr_cap; 380 const int cap = host->pmecc_corr_cap;
381 381
@@ -724,6 +724,7 @@ static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
724 struct atmel_nand_host *host = nand_chip->priv; 724 struct atmel_nand_host *host = nand_chip->priv;
725 int i, err_nbr, eccbytes; 725 int i, err_nbr, eccbytes;
726 uint8_t *buf_pos; 726 uint8_t *buf_pos;
727 int total_err = 0;
727 728
728 eccbytes = nand_chip->ecc.bytes; 729 eccbytes = nand_chip->ecc.bytes;
729 for (i = 0; i < eccbytes; i++) 730 for (i = 0; i < eccbytes; i++)
@@ -751,12 +752,13 @@ normal_check:
751 pmecc_correct_data(mtd, buf_pos, ecc, i, 752 pmecc_correct_data(mtd, buf_pos, ecc, i,
752 host->pmecc_bytes_per_sector, err_nbr); 753 host->pmecc_bytes_per_sector, err_nbr);
753 mtd->ecc_stats.corrected += err_nbr; 754 mtd->ecc_stats.corrected += err_nbr;
755 total_err += err_nbr;
754 } 756 }
755 } 757 }
756 pmecc_stat >>= 1; 758 pmecc_stat >>= 1;
757 } 759 }
758 760
759 return 0; 761 return total_err;
760} 762}
761 763
762static int atmel_nand_pmecc_read_page(struct mtd_info *mtd, 764static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
@@ -768,6 +770,7 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
768 uint32_t *eccpos = chip->ecc.layout->eccpos; 770 uint32_t *eccpos = chip->ecc.layout->eccpos;
769 uint32_t stat; 771 uint32_t stat;
770 unsigned long end_time; 772 unsigned long end_time;
773 int bitflips = 0;
771 774
772 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST); 775 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
773 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE); 776 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
@@ -790,11 +793,14 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
790 } 793 }
791 794
792 stat = pmecc_readl_relaxed(host->ecc, ISR); 795 stat = pmecc_readl_relaxed(host->ecc, ISR);
793 if (stat != 0) 796 if (stat != 0) {
794 if (pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]) != 0) 797 bitflips = pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]);
795 return -EIO; 798 if (bitflips < 0)
799 /* uncorrectable errors */
800 return 0;
801 }
796 802
797 return 0; 803 return bitflips;
798} 804}
799 805
800static int atmel_nand_pmecc_write_page(struct mtd_info *mtd, 806static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
@@ -1206,7 +1212,7 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
1206} 1212}
1207 1213
1208#if defined(CONFIG_OF) 1214#if defined(CONFIG_OF)
1209static int __devinit atmel_of_init_port(struct atmel_nand_host *host, 1215static int atmel_of_init_port(struct atmel_nand_host *host,
1210 struct device_node *np) 1216 struct device_node *np)
1211{ 1217{
1212 u32 val, table_offset; 1218 u32 val, table_offset;
@@ -1293,7 +1299,7 @@ static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
1293 return 0; 1299 return 0;
1294} 1300}
1295#else 1301#else
1296static int __devinit atmel_of_init_port(struct atmel_nand_host *host, 1302static int atmel_of_init_port(struct atmel_nand_host *host,
1297 struct device_node *np) 1303 struct device_node *np)
1298{ 1304{
1299 return -EINVAL; 1305 return -EINVAL;
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 5c47b200045a..217459d02b2f 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -382,7 +382,7 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
382 while(!this->dev_ready(mtd)); 382 while(!this->dev_ready(mtd));
383} 383}
384 384
385static int __devinit find_nand_cs(unsigned long nand_base) 385static int find_nand_cs(unsigned long nand_base)
386{ 386{
387 void __iomem *base = 387 void __iomem *base =
388 (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR); 388 (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
@@ -403,7 +403,7 @@ static int __devinit find_nand_cs(unsigned long nand_base)
403 return -ENODEV; 403 return -ENODEV;
404} 404}
405 405
406static int __devinit au1550nd_probe(struct platform_device *pdev) 406static int au1550nd_probe(struct platform_device *pdev)
407{ 407{
408 struct au1550nd_platdata *pd; 408 struct au1550nd_platdata *pd;
409 struct au1550nd_ctx *ctx; 409 struct au1550nd_ctx *ctx;
@@ -491,7 +491,7 @@ out1:
491 return ret; 491 return ret;
492} 492}
493 493
494static int __devexit au1550nd_remove(struct platform_device *pdev) 494static int au1550nd_remove(struct platform_device *pdev)
495{ 495{
496 struct au1550nd_ctx *ctx = platform_get_drvdata(pdev); 496 struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
497 struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 497 struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -509,7 +509,7 @@ static struct platform_driver au1550nd_driver = {
509 .owner = THIS_MODULE, 509 .owner = THIS_MODULE,
510 }, 510 },
511 .probe = au1550nd_probe, 511 .probe = au1550nd_probe,
512 .remove = __devexit_p(au1550nd_remove), 512 .remove = au1550nd_remove,
513}; 513};
514 514
515module_platform_driver(au1550nd_driver); 515module_platform_driver(au1550nd_driver);
diff --git a/drivers/mtd/nand/bcm47xxnflash/Makefile b/drivers/mtd/nand/bcm47xxnflash/Makefile
new file mode 100644
index 000000000000..f05b119e134b
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/Makefile
@@ -0,0 +1,4 @@
1bcm47xxnflash-y += main.o
2bcm47xxnflash-y += ops_bcm4706.o
3
4obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash.o
diff --git a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
new file mode 100644
index 000000000000..0bdb2ce4da75
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
@@ -0,0 +1,22 @@
1#ifndef __BCM47XXNFLASH_H
2#define __BCM47XXNFLASH_H
3
4#include <linux/mtd/mtd.h>
5#include <linux/mtd/nand.h>
6
7struct bcm47xxnflash {
8 struct bcma_drv_cc *cc;
9
10 struct nand_chip nand_chip;
11 struct mtd_info mtd;
12
13 unsigned curr_command;
14 int curr_page_addr;
15 int curr_column;
16
17 u8 id_data[8];
18};
19
20int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n);
21
22#endif /* BCM47XXNFLASH */
diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c
new file mode 100644
index 000000000000..2b8b05bec3dd
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/main.c
@@ -0,0 +1,108 @@
1/*
2 * BCM47XX NAND flash driver
3 *
4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/platform_device.h>
16#include <linux/bcma/bcma.h>
17
18#include "bcm47xxnflash.h"
19
20MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
21MODULE_LICENSE("GPL");
22MODULE_AUTHOR("Rafał Miłecki");
23
24static const char *probes[] = { "bcm47xxpart", NULL };
25
26static int bcm47xxnflash_probe(struct platform_device *pdev)
27{
28 struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
29 struct bcm47xxnflash *b47n;
30 int err = 0;
31
32 b47n = kzalloc(sizeof(*b47n), GFP_KERNEL);
33 if (!b47n) {
34 err = -ENOMEM;
35 goto out;
36 }
37
38 b47n->nand_chip.priv = b47n;
39 b47n->mtd.owner = THIS_MODULE;
40 b47n->mtd.priv = &b47n->nand_chip; /* Required */
41 b47n->cc = container_of(nflash, struct bcma_drv_cc, nflash);
42
43 if (b47n->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
44 err = bcm47xxnflash_ops_bcm4706_init(b47n);
45 } else {
46 pr_err("Device not supported\n");
47 err = -ENOTSUPP;
48 }
49 if (err) {
50 pr_err("Initialization failed: %d\n", err);
51 goto err_init;
52 }
53
54 err = mtd_device_parse_register(&b47n->mtd, probes, NULL, NULL, 0);
55 if (err) {
56 pr_err("Failed to register MTD device: %d\n", err);
57 goto err_dev_reg;
58 }
59
60 return 0;
61
62err_dev_reg:
63err_init:
64 kfree(b47n);
65out:
66 return err;
67}
68
69static int __devexit bcm47xxnflash_remove(struct platform_device *pdev)
70{
71 struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
72
73 if (nflash->mtd)
74 mtd_device_unregister(nflash->mtd);
75
76 return 0;
77}
78
79static struct platform_driver bcm47xxnflash_driver = {
80 .remove = __devexit_p(bcm47xxnflash_remove),
81 .driver = {
82 .name = "bcma_nflash",
83 .owner = THIS_MODULE,
84 },
85};
86
87static int __init bcm47xxnflash_init(void)
88{
89 int err;
90
91 /*
92 * Platform device "bcma_nflash" exists on SoCs and is registered very
93 * early, it won't be added during runtime (use platform_driver_probe).
94 */
95 err = platform_driver_probe(&bcm47xxnflash_driver, bcm47xxnflash_probe);
96 if (err)
97 pr_err("Failed to register serial flash driver: %d\n", err);
98
99 return err;
100}
101
102static void __exit bcm47xxnflash_exit(void)
103{
104 platform_driver_unregister(&bcm47xxnflash_driver);
105}
106
107module_init(bcm47xxnflash_init);
108module_exit(bcm47xxnflash_exit);
diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
new file mode 100644
index 000000000000..86c9a79b89b3
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
@@ -0,0 +1,413 @@
1/*
2 * BCM47XX NAND flash driver
3 *
4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/bcma/bcma.h>
16
17#include "bcm47xxnflash.h"
18
19/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
20 * shown 164 retries as maxiumum. */
21#define NFLASH_READY_RETRIES 1000
22
23#define NFLASH_SECTOR_SIZE 512
24
25#define NCTL_CMD0 0x00010000
26#define NCTL_CMD1W 0x00080000
27#define NCTL_READ 0x00100000
28#define NCTL_WRITE 0x00200000
29#define NCTL_SPECADDR 0x01000000
30#define NCTL_READY 0x04000000
31#define NCTL_ERR 0x08000000
32#define NCTL_CSA 0x40000000
33#define NCTL_START 0x80000000
34
35/**************************************************
36 * Various helpers
37 **************************************************/
38
39static inline u8 bcm47xxnflash_ops_bcm4706_ns_to_cycle(u16 ns, u16 clock)
40{
41 return ((ns * 1000 * clock) / 1000000) + 1;
42}
43
44static int bcm47xxnflash_ops_bcm4706_ctl_cmd(struct bcma_drv_cc *cc, u32 code)
45{
46 int i = 0;
47
48 bcma_cc_write32(cc, BCMA_CC_NFLASH_CTL, NCTL_START | code);
49 for (i = 0; i < NFLASH_READY_RETRIES; i++) {
50 if (!(bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_START)) {
51 i = 0;
52 break;
53 }
54 }
55 if (i) {
56 pr_err("NFLASH control command not ready!\n");
57 return -EBUSY;
58 }
59 return 0;
60}
61
62static int bcm47xxnflash_ops_bcm4706_poll(struct bcma_drv_cc *cc)
63{
64 int i;
65
66 for (i = 0; i < NFLASH_READY_RETRIES; i++) {
67 if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_READY) {
68 if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) &
69 BCMA_CC_NFLASH_CTL_ERR) {
70 pr_err("Error on polling\n");
71 return -EBUSY;
72 } else {
73 return 0;
74 }
75 }
76 }
77
78 pr_err("Polling timeout!\n");
79 return -EBUSY;
80}
81
82/**************************************************
83 * R/W
84 **************************************************/
85
86static void bcm47xxnflash_ops_bcm4706_read(struct mtd_info *mtd, uint8_t *buf,
87 int len)
88{
89 struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
90 struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
91
92 u32 ctlcode;
93 u32 *dest = (u32 *)buf;
94 int i;
95 int toread;
96
97 BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
98 /* Don't validate column using nand_chip->page_shift, it may be bigger
99 * when accessing OOB */
100
101 while (len) {
102 /* We can read maximum of 0x200 bytes at once */
103 toread = min(len, 0x200);
104
105 /* Set page and column */
106 bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_COL_ADDR,
107 b47n->curr_column);
108 bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_ROW_ADDR,
109 b47n->curr_page_addr);
110
111 /* Prepare to read */
112 ctlcode = NCTL_CSA | NCTL_CMD1W | 0x00040000 | 0x00020000 |
113 NCTL_CMD0;
114 ctlcode |= NAND_CMD_READSTART << 8;
115 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode))
116 return;
117 if (bcm47xxnflash_ops_bcm4706_poll(b47n->cc))
118 return;
119
120 /* Eventually read some data :) */
121 for (i = 0; i < toread; i += 4, dest++) {
122 ctlcode = NCTL_CSA | 0x30000000 | NCTL_READ;
123 if (i == toread - 4) /* Last read goes without that */
124 ctlcode &= ~NCTL_CSA;
125 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
126 ctlcode))
127 return;
128 *dest = bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA);
129 }
130
131 b47n->curr_column += toread;
132 len -= toread;
133 }
134}
135
136static void bcm47xxnflash_ops_bcm4706_write(struct mtd_info *mtd,
137 const uint8_t *buf, int len)
138{
139 struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
140 struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
141 struct bcma_drv_cc *cc = b47n->cc;
142
143 u32 ctlcode;
144 const u32 *data = (u32 *)buf;
145 int i;
146
147 BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
148 /* Don't validate column using nand_chip->page_shift, it may be bigger
149 * when accessing OOB */
150
151 for (i = 0; i < len; i += 4, data++) {
152 bcma_cc_write32(cc, BCMA_CC_NFLASH_DATA, *data);
153
154 ctlcode = NCTL_CSA | 0x30000000 | NCTL_WRITE;
155 if (i == len - 4) /* Last read goes without that */
156 ctlcode &= ~NCTL_CSA;
157 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) {
158 pr_err("%s ctl_cmd didn't work!\n", __func__);
159 return;
160 }
161 }
162
163 b47n->curr_column += len;
164}
165
166/**************************************************
167 * NAND chip ops
168 **************************************************/
169
170/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
171static void bcm47xxnflash_ops_bcm4706_select_chip(struct mtd_info *mtd,
172 int chip)
173{
174 return;
175}
176
177/*
178 * Default nand_command and nand_command_lp don't match BCM4706 hardware layout.
179 * For example, reading chip id is performed in a non-standard way.
180 * Setting column and page is also handled differently, we use a special
181 * registers of ChipCommon core. Hacking cmd_ctrl to understand and convert
182 * standard commands would be much more complicated.
183 */
184static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct mtd_info *mtd,
185 unsigned command, int column,
186 int page_addr)
187{
188 struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
189 struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
190 struct bcma_drv_cc *cc = b47n->cc;
191 u32 ctlcode;
192 int i;
193
194 if (column != -1)
195 b47n->curr_column = column;
196 if (page_addr != -1)
197 b47n->curr_page_addr = page_addr;
198
199 switch (command) {
200 case NAND_CMD_RESET:
201 pr_warn("Chip reset not implemented yet\n");
202 break;
203 case NAND_CMD_READID:
204 ctlcode = NCTL_CSA | 0x01000000 | NCTL_CMD1W | NCTL_CMD0;
205 ctlcode |= NAND_CMD_READID;
206 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode)) {
207 pr_err("READID error\n");
208 break;
209 }
210
211 /*
212 * Reading is specific, last one has to go without NCTL_CSA
213 * bit. We don't know how many reads NAND subsystem is going
214 * to perform, so cache everything.
215 */
216 for (i = 0; i < ARRAY_SIZE(b47n->id_data); i++) {
217 ctlcode = NCTL_CSA | NCTL_READ;
218 if (i == ARRAY_SIZE(b47n->id_data) - 1)
219 ctlcode &= ~NCTL_CSA;
220 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
221 ctlcode)) {
222 pr_err("READID error\n");
223 break;
224 }
225 b47n->id_data[i] =
226 bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA)
227 & 0xFF;
228 }
229
230 break;
231 case NAND_CMD_STATUS:
232 ctlcode = NCTL_CSA | NCTL_CMD0 | NAND_CMD_STATUS;
233 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
234 pr_err("STATUS command error\n");
235 break;
236 case NAND_CMD_READ0:
237 break;
238 case NAND_CMD_READOOB:
239 if (page_addr != -1)
240 b47n->curr_column += mtd->writesize;
241 break;
242 case NAND_CMD_ERASE1:
243 bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
244 b47n->curr_page_addr);
245 ctlcode = 0x00040000 | NCTL_CMD1W | NCTL_CMD0 |
246 NAND_CMD_ERASE1 | (NAND_CMD_ERASE2 << 8);
247 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
248 pr_err("ERASE1 failed\n");
249 break;
250 case NAND_CMD_ERASE2:
251 break;
252 case NAND_CMD_SEQIN:
253 /* Set page and column */
254 bcma_cc_write32(cc, BCMA_CC_NFLASH_COL_ADDR,
255 b47n->curr_column);
256 bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
257 b47n->curr_page_addr);
258
259 /* Prepare to write */
260 ctlcode = 0x40000000 | 0x00040000 | 0x00020000 | 0x00010000;
261 ctlcode |= NAND_CMD_SEQIN;
262 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
263 pr_err("SEQIN failed\n");
264 break;
265 case NAND_CMD_PAGEPROG:
266 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, 0x00010000 |
267 NAND_CMD_PAGEPROG))
268 pr_err("PAGEPROG failed\n");
269 if (bcm47xxnflash_ops_bcm4706_poll(cc))
270 pr_err("PAGEPROG not ready\n");
271 break;
272 default:
273 pr_err("Command 0x%X unsupported\n", command);
274 break;
275 }
276 b47n->curr_command = command;
277}
278
279static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct mtd_info *mtd)
280{
281 struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
282 struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
283 struct bcma_drv_cc *cc = b47n->cc;
284 u32 tmp = 0;
285
286 switch (b47n->curr_command) {
287 case NAND_CMD_READID:
288 if (b47n->curr_column >= ARRAY_SIZE(b47n->id_data)) {
289 pr_err("Requested invalid id_data: %d\n",
290 b47n->curr_column);
291 return 0;
292 }
293 return b47n->id_data[b47n->curr_column++];
294 case NAND_CMD_STATUS:
295 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_READ))
296 return 0;
297 return bcma_cc_read32(cc, BCMA_CC_NFLASH_DATA) & 0xff;
298 case NAND_CMD_READOOB:
299 bcm47xxnflash_ops_bcm4706_read(mtd, (u8 *)&tmp, 4);
300 return tmp & 0xFF;
301 }
302
303 pr_err("Invalid command for byte read: 0x%X\n", b47n->curr_command);
304 return 0;
305}
306
307static void bcm47xxnflash_ops_bcm4706_read_buf(struct mtd_info *mtd,
308 uint8_t *buf, int len)
309{
310 struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
311 struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
312
313 switch (b47n->curr_command) {
314 case NAND_CMD_READ0:
315 case NAND_CMD_READOOB:
316 bcm47xxnflash_ops_bcm4706_read(mtd, buf, len);
317 return;
318 }
319
320 pr_err("Invalid command for buf read: 0x%X\n", b47n->curr_command);
321}
322
323static void bcm47xxnflash_ops_bcm4706_write_buf(struct mtd_info *mtd,
324 const uint8_t *buf, int len)
325{
326 struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
327 struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
328
329 switch (b47n->curr_command) {
330 case NAND_CMD_SEQIN:
331 bcm47xxnflash_ops_bcm4706_write(mtd, buf, len);
332 return;
333 }
334
335 pr_err("Invalid command for buf write: 0x%X\n", b47n->curr_command);
336}
337
338/**************************************************
339 * Init
340 **************************************************/
341
342int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
343{
344 int err;
345 u32 freq;
346 u16 clock;
347 u8 w0, w1, w2, w3, w4;
348
349 unsigned long chipsize; /* MiB */
350 u8 tbits, col_bits, col_size, row_bits, row_bsize;
351 u32 val;
352
353 b47n->nand_chip.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
354 b47n->nand_chip.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
355 b47n->nand_chip.read_byte = bcm47xxnflash_ops_bcm4706_read_byte;
356 b47n->nand_chip.read_buf = bcm47xxnflash_ops_bcm4706_read_buf;
357 b47n->nand_chip.write_buf = bcm47xxnflash_ops_bcm4706_write_buf;
358 b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
359 b47n->nand_chip.ecc.mode = NAND_ECC_NONE; /* TODO: implement ECC */
360
361 /* Enable NAND flash access */
362 bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
363 BCMA_CC_4706_FLASHSCFG_NF1);
364
365 /* Configure wait counters */
366 if (b47n->cc->status & BCMA_CC_CHIPST_4706_PKG_OPTION) {
367 freq = 100000000;
368 } else {
369 freq = bcma_chipco_pll_read(b47n->cc, 4);
370 freq = (freq * 0xFFF) >> 3;
371 freq = (freq * 25000000) >> 3;
372 }
373 clock = freq / 1000000;
374 w0 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(15, clock);
375 w1 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(20, clock);
376 w2 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
377 w3 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
378 w4 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(100, clock);
379 bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_WAITCNT0,
380 (w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0));
381
382 /* Scan NAND */
383 err = nand_scan(&b47n->mtd, 1);
384 if (err) {
385 pr_err("Could not scan NAND flash: %d\n", err);
386 goto exit;
387 }
388
389 /* Configure FLASH */
390 chipsize = b47n->nand_chip.chipsize >> 20;
391 tbits = ffs(chipsize); /* find first bit set */
392 if (!tbits || tbits != fls(chipsize)) {
393 pr_err("Invalid flash size: 0x%lX\n", chipsize);
394 err = -ENOTSUPP;
395 goto exit;
396 }
397 tbits += 19; /* Broadcom increases *index* by 20, we increase *pos* */
398
399 col_bits = b47n->nand_chip.page_shift + 1;
400 col_size = (col_bits + 7) / 8;
401
402 row_bits = tbits - col_bits + 1;
403 row_bsize = (row_bits + 7) / 8;
404
405 val = ((row_bsize - 1) << 6) | ((col_size - 1) << 4) | 2;
406 bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_CONF, val);
407
408exit:
409 if (err)
410 bcma_cc_mask32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
411 ~BCMA_CC_4706_FLASHSCFG_NF1);
412 return err;
413}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index ab0caa74eb43..4271e948d1e2 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -658,7 +658,7 @@ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
658/* 658/*
659 * Device management interface 659 * Device management interface
660 */ 660 */
661static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info) 661static int bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
662{ 662{
663 struct mtd_info *mtd = &info->mtd; 663 struct mtd_info *mtd = &info->mtd;
664 struct mtd_partition *parts = info->platform->partitions; 664 struct mtd_partition *parts = info->platform->partitions;
@@ -667,7 +667,7 @@ static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
667 return mtd_device_register(mtd, parts, nr); 667 return mtd_device_register(mtd, parts, nr);
668} 668}
669 669
670static int __devexit bf5xx_nand_remove(struct platform_device *pdev) 670static int bf5xx_nand_remove(struct platform_device *pdev)
671{ 671{
672 struct bf5xx_nand_info *info = to_nand_info(pdev); 672 struct bf5xx_nand_info *info = to_nand_info(pdev);
673 673
@@ -725,7 +725,7 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
725 * it can allocate all necessary resources then calls the 725 * it can allocate all necessary resources then calls the
726 * nand layer to look for devices 726 * nand layer to look for devices
727 */ 727 */
728static int __devinit bf5xx_nand_probe(struct platform_device *pdev) 728static int bf5xx_nand_probe(struct platform_device *pdev)
729{ 729{
730 struct bf5xx_nand_platform *plat = to_nand_plat(pdev); 730 struct bf5xx_nand_platform *plat = to_nand_plat(pdev);
731 struct bf5xx_nand_info *info = NULL; 731 struct bf5xx_nand_info *info = NULL;
@@ -865,7 +865,7 @@ static int bf5xx_nand_resume(struct platform_device *dev)
865/* driver device registration */ 865/* driver device registration */
866static struct platform_driver bf5xx_nand_driver = { 866static struct platform_driver bf5xx_nand_driver = {
867 .probe = bf5xx_nand_probe, 867 .probe = bf5xx_nand_probe,
868 .remove = __devexit_p(bf5xx_nand_remove), 868 .remove = bf5xx_nand_remove,
869 .suspend = bf5xx_nand_suspend, 869 .suspend = bf5xx_nand_suspend,
870 .resume = bf5xx_nand_resume, 870 .resume = bf5xx_nand_resume,
871 .driver = { 871 .driver = {
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 2bb7170502c2..010d61266536 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -585,7 +585,7 @@ static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
585} 585}
586 586
587/* F_2[X]/(X**6+X+1) */ 587/* F_2[X]/(X**6+X+1) */
588static unsigned short __devinit gf64_mul(u8 a, u8 b) 588static unsigned short gf64_mul(u8 a, u8 b)
589{ 589{
590 u8 c; 590 u8 c;
591 unsigned int i; 591 unsigned int i;
@@ -604,7 +604,7 @@ static unsigned short __devinit gf64_mul(u8 a, u8 b)
604} 604}
605 605
606/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */ 606/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */
607static u16 __devinit gf4096_mul(u16 a, u16 b) 607static u16 gf4096_mul(u16 a, u16 b)
608{ 608{
609 u8 ah, al, bh, bl, ch, cl; 609 u8 ah, al, bh, bl, ch, cl;
610 610
@@ -619,14 +619,14 @@ static u16 __devinit gf4096_mul(u16 a, u16 b)
619 return (ch << 6) ^ cl; 619 return (ch << 6) ^ cl;
620} 620}
621 621
622static int __devinit cafe_mul(int x) 622static int cafe_mul(int x)
623{ 623{
624 if (x == 0) 624 if (x == 0)
625 return 1; 625 return 1;
626 return gf4096_mul(x, 0xe01); 626 return gf4096_mul(x, 0xe01);
627} 627}
628 628
629static int __devinit cafe_nand_probe(struct pci_dev *pdev, 629static int cafe_nand_probe(struct pci_dev *pdev,
630 const struct pci_device_id *ent) 630 const struct pci_device_id *ent)
631{ 631{
632 struct mtd_info *mtd; 632 struct mtd_info *mtd;
@@ -821,7 +821,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
821 return err; 821 return err;
822} 822}
823 823
824static void __devexit cafe_nand_remove(struct pci_dev *pdev) 824static void cafe_nand_remove(struct pci_dev *pdev)
825{ 825{
826 struct mtd_info *mtd = pci_get_drvdata(pdev); 826 struct mtd_info *mtd = pci_get_drvdata(pdev);
827 struct cafe_priv *cafe = mtd->priv; 827 struct cafe_priv *cafe = mtd->priv;
@@ -887,7 +887,7 @@ static struct pci_driver cafe_nand_pci_driver = {
887 .name = "CAFÉ NAND", 887 .name = "CAFÉ NAND",
888 .id_table = cafe_nand_tbl, 888 .id_table = cafe_nand_tbl,
889 .probe = cafe_nand_probe, 889 .probe = cafe_nand_probe,
890 .remove = __devexit_p(cafe_nand_remove), 890 .remove = cafe_nand_remove,
891 .resume = cafe_nand_resume, 891 .resume = cafe_nand_resume,
892}; 892};
893 893
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index adb6c3ef37fb..2cdeab8bebc4 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -237,6 +237,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
237 this->ecc.hwctl = cs_enable_hwecc; 237 this->ecc.hwctl = cs_enable_hwecc;
238 this->ecc.calculate = cs_calculate_ecc; 238 this->ecc.calculate = cs_calculate_ecc;
239 this->ecc.correct = nand_correct_data; 239 this->ecc.correct = nand_correct_data;
240 this->ecc.strength = 1;
240 241
241 /* Enable the following for a flash based bad block table */ 242 /* Enable the following for a flash based bad block table */
242 this->bbt_options = NAND_BBT_USE_FLASH; 243 this->bbt_options = NAND_BBT_USE_FLASH;
@@ -247,8 +248,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
247 goto out_ior; 248 goto out_ior;
248 } 249 }
249 250
250 this->ecc.strength = 1;
251
252 new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs); 251 new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
253 252
254 cs553x_mtd[cs] = new_mtd; 253 cs553x_mtd[cs] = new_mtd;
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 945047ad0952..3502606f6480 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -821,9 +821,16 @@ syndrome_done:
821 if (ret < 0) 821 if (ret < 0)
822 goto err_scan; 822 goto err_scan;
823 823
824 ret = mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts, 824 if (pdata->parts)
825 pdata->nr_parts); 825 ret = mtd_device_parse_register(&info->mtd, NULL, NULL,
826 826 pdata->parts, pdata->nr_parts);
827 else {
828 struct mtd_part_parser_data ppdata;
829
830 ppdata.of_node = pdev->dev.of_node;
831 ret = mtd_device_parse_register(&info->mtd, NULL, &ppdata,
832 NULL, 0);
833 }
827 if (ret < 0) 834 if (ret < 0)
828 goto err_scan; 835 goto err_scan;
829 836
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index e706a237170f..0c8bb6bf8424 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -16,14 +16,12 @@
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 * 17 *
18 */ 18 */
19
20#include <linux/interrupt.h> 19#include <linux/interrupt.h>
21#include <linux/delay.h> 20#include <linux/delay.h>
22#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
23#include <linux/wait.h> 22#include <linux/wait.h>
24#include <linux/mutex.h> 23#include <linux/mutex.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
26#include <linux/pci.h>
27#include <linux/mtd/mtd.h> 25#include <linux/mtd/mtd.h>
28#include <linux/module.h> 26#include <linux/module.h>
29 27
@@ -89,13 +87,6 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
89 * format the bank into the proper bits for the controller */ 87 * format the bank into the proper bits for the controller */
90#define BANK(x) ((x) << 24) 88#define BANK(x) ((x) << 24)
91 89
92/* List of platforms this NAND controller has be integrated into */
93static const struct pci_device_id denali_pci_ids[] = {
94 { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
95 { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
96 { /* end: all zeroes */ }
97};
98
99/* forward declarations */ 90/* forward declarations */
100static void clear_interrupts(struct denali_nand_info *denali); 91static void clear_interrupts(struct denali_nand_info *denali);
101static uint32_t wait_for_irq(struct denali_nand_info *denali, 92static uint32_t wait_for_irq(struct denali_nand_info *denali,
@@ -699,7 +690,7 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
699 690
700 if (comp_res == 0) { 691 if (comp_res == 0) {
701 /* timeout */ 692 /* timeout */
702 printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n", 693 pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n",
703 intr_status, irq_mask); 694 intr_status, irq_mask);
704 695
705 intr_status = 0; 696 intr_status = 0;
@@ -1305,8 +1296,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
1305 /* TODO: Read OOB data */ 1296 /* TODO: Read OOB data */
1306 break; 1297 break;
1307 default: 1298 default:
1308 printk(KERN_ERR ": unsupported command" 1299 pr_err(": unsupported command received 0x%x\n", cmd);
1309 " received 0x%x\n", cmd);
1310 break; 1300 break;
1311 } 1301 }
1312} 1302}
@@ -1425,107 +1415,48 @@ void denali_drv_init(struct denali_nand_info *denali)
1425 denali->irq_status = 0; 1415 denali->irq_status = 0;
1426} 1416}
1427 1417
1428/* driver entry point */ 1418int denali_init(struct denali_nand_info *denali)
1429static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1430{ 1419{
1431 int ret = -ENODEV; 1420 int ret;
1432 resource_size_t csr_base, mem_base;
1433 unsigned long csr_len, mem_len;
1434 struct denali_nand_info *denali;
1435
1436 denali = kzalloc(sizeof(*denali), GFP_KERNEL);
1437 if (!denali)
1438 return -ENOMEM;
1439 1421
1440 ret = pci_enable_device(dev); 1422 if (denali->platform == INTEL_CE4100) {
1441 if (ret) {
1442 printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
1443 goto failed_alloc_memery;
1444 }
1445
1446 if (id->driver_data == INTEL_CE4100) {
1447 /* Due to a silicon limitation, we can only support 1423 /* Due to a silicon limitation, we can only support
1448 * ONFI timing mode 1 and below. 1424 * ONFI timing mode 1 and below.
1449 */ 1425 */
1450 if (onfi_timing_mode < -1 || onfi_timing_mode > 1) { 1426 if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
1451 printk(KERN_ERR "Intel CE4100 only supports" 1427 pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n");
1452 " ONFI timing mode 1 or below\n"); 1428 return -EINVAL;
1453 ret = -EINVAL;
1454 goto failed_enable_dev;
1455 }
1456 denali->platform = INTEL_CE4100;
1457 mem_base = pci_resource_start(dev, 0);
1458 mem_len = pci_resource_len(dev, 1);
1459 csr_base = pci_resource_start(dev, 1);
1460 csr_len = pci_resource_len(dev, 1);
1461 } else {
1462 denali->platform = INTEL_MRST;
1463 csr_base = pci_resource_start(dev, 0);
1464 csr_len = pci_resource_len(dev, 0);
1465 mem_base = pci_resource_start(dev, 1);
1466 mem_len = pci_resource_len(dev, 1);
1467 if (!mem_len) {
1468 mem_base = csr_base + csr_len;
1469 mem_len = csr_len;
1470 } 1429 }
1471 } 1430 }
1472 1431
1473 /* Is 32-bit DMA supported? */ 1432 /* Is 32-bit DMA supported? */
1474 ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); 1433 ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
1475 if (ret) { 1434 if (ret) {
1476 printk(KERN_ERR "Spectra: no usable DMA configuration\n"); 1435 pr_err("Spectra: no usable DMA configuration\n");
1477 goto failed_enable_dev; 1436 return ret;
1478 } 1437 }
1479 denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf, 1438 denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
1480 DENALI_BUF_SIZE, 1439 DENALI_BUF_SIZE,
1481 DMA_BIDIRECTIONAL); 1440 DMA_BIDIRECTIONAL);
1482 1441
1483 if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) { 1442 if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
1484 dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n"); 1443 dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
1485 goto failed_enable_dev; 1444 return -EIO;
1486 }
1487
1488 pci_set_master(dev);
1489 denali->dev = &dev->dev;
1490 denali->mtd.dev.parent = &dev->dev;
1491
1492 ret = pci_request_regions(dev, DENALI_NAND_NAME);
1493 if (ret) {
1494 printk(KERN_ERR "Spectra: Unable to request memory regions\n");
1495 goto failed_dma_map;
1496 }
1497
1498 denali->flash_reg = ioremap_nocache(csr_base, csr_len);
1499 if (!denali->flash_reg) {
1500 printk(KERN_ERR "Spectra: Unable to remap memory region\n");
1501 ret = -ENOMEM;
1502 goto failed_req_regions;
1503 }
1504
1505 denali->flash_mem = ioremap_nocache(mem_base, mem_len);
1506 if (!denali->flash_mem) {
1507 printk(KERN_ERR "Spectra: ioremap_nocache failed!");
1508 ret = -ENOMEM;
1509 goto failed_remap_reg;
1510 } 1445 }
1511 1446 denali->mtd.dev.parent = denali->dev;
1512 denali_hw_init(denali); 1447 denali_hw_init(denali);
1513 denali_drv_init(denali); 1448 denali_drv_init(denali);
1514 1449
1515 /* denali_isr register is done after all the hardware 1450 /* denali_isr register is done after all the hardware
1516 * initilization is finished*/ 1451 * initilization is finished*/
1517 if (request_irq(dev->irq, denali_isr, IRQF_SHARED, 1452 if (request_irq(denali->irq, denali_isr, IRQF_SHARED,
1518 DENALI_NAND_NAME, denali)) { 1453 DENALI_NAND_NAME, denali)) {
1519 printk(KERN_ERR "Spectra: Unable to allocate IRQ\n"); 1454 pr_err("Spectra: Unable to allocate IRQ\n");
1520 ret = -ENODEV; 1455 return -ENODEV;
1521 goto failed_remap_mem;
1522 } 1456 }
1523 1457
1524 /* now that our ISR is registered, we can enable interrupts */ 1458 /* now that our ISR is registered, we can enable interrupts */
1525 denali_set_intr_modes(denali, true); 1459 denali_set_intr_modes(denali, true);
1526
1527 pci_set_drvdata(dev, denali);
1528
1529 denali->mtd.name = "denali-nand"; 1460 denali->mtd.name = "denali-nand";
1530 denali->mtd.owner = THIS_MODULE; 1461 denali->mtd.owner = THIS_MODULE;
1531 denali->mtd.priv = &denali->nand; 1462 denali->mtd.priv = &denali->nand;
@@ -1549,8 +1480,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1549 */ 1480 */
1550 if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) { 1481 if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
1551 ret = -ENODEV; 1482 ret = -ENODEV;
1552 printk(KERN_ERR "Spectra: device size not supported by this " 1483 pr_err("Spectra: device size not supported by this version of MTD.");
1553 "version of MTD.");
1554 goto failed_req_irq; 1484 goto failed_req_irq;
1555 } 1485 }
1556 1486
@@ -1602,8 +1532,8 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1602 } else if (denali->mtd.oobsize < (denali->bbtskipbytes + 1532 } else if (denali->mtd.oobsize < (denali->bbtskipbytes +
1603 ECC_8BITS * (denali->mtd.writesize / 1533 ECC_8BITS * (denali->mtd.writesize /
1604 ECC_SECTOR_SIZE))) { 1534 ECC_SECTOR_SIZE))) {
1605 printk(KERN_ERR "Your NAND chip OOB is not large enough to" 1535 pr_err("Your NAND chip OOB is not large enough to \
1606 " contain 8bit ECC correction codes"); 1536 contain 8bit ECC correction codes");
1607 goto failed_req_irq; 1537 goto failed_req_irq;
1608 } else { 1538 } else {
1609 denali->nand.ecc.strength = 8; 1539 denali->nand.ecc.strength = 8;
@@ -1655,56 +1585,24 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1655 1585
1656 ret = mtd_device_register(&denali->mtd, NULL, 0); 1586 ret = mtd_device_register(&denali->mtd, NULL, 0);
1657 if (ret) { 1587 if (ret) {
1658 dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n", 1588 dev_err(denali->dev, "Spectra: Failed to register MTD: %d\n",
1659 ret); 1589 ret);
1660 goto failed_req_irq; 1590 goto failed_req_irq;
1661 } 1591 }
1662 return 0; 1592 return 0;
1663 1593
1664failed_req_irq: 1594failed_req_irq:
1665 denali_irq_cleanup(dev->irq, denali); 1595 denali_irq_cleanup(denali->irq, denali);
1666failed_remap_mem: 1596
1667 iounmap(denali->flash_mem);
1668failed_remap_reg:
1669 iounmap(denali->flash_reg);
1670failed_req_regions:
1671 pci_release_regions(dev);
1672failed_dma_map:
1673 dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
1674 DMA_BIDIRECTIONAL);
1675failed_enable_dev:
1676 pci_disable_device(dev);
1677failed_alloc_memery:
1678 kfree(denali);
1679 return ret; 1597 return ret;
1680} 1598}
1599EXPORT_SYMBOL(denali_init);
1681 1600
1682/* driver exit point */ 1601/* driver exit point */
1683static void denali_pci_remove(struct pci_dev *dev) 1602void denali_remove(struct denali_nand_info *denali)
1684{ 1603{
1685 struct denali_nand_info *denali = pci_get_drvdata(dev); 1604 denali_irq_cleanup(denali->irq, denali);
1686 1605 dma_unmap_single(denali->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
1687 nand_release(&denali->mtd); 1606 DMA_BIDIRECTIONAL);
1688
1689 denali_irq_cleanup(dev->irq, denali);
1690
1691 iounmap(denali->flash_reg);
1692 iounmap(denali->flash_mem);
1693 pci_release_regions(dev);
1694 pci_disable_device(dev);
1695 dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
1696 DMA_BIDIRECTIONAL);
1697 pci_set_drvdata(dev, NULL);
1698 kfree(denali);
1699} 1607}
1700 1608EXPORT_SYMBOL(denali_remove);
1701MODULE_DEVICE_TABLE(pci, denali_pci_ids);
1702
1703static struct pci_driver denali_pci_driver = {
1704 .name = DENALI_NAND_NAME,
1705 .id_table = denali_pci_ids,
1706 .probe = denali_pci_probe,
1707 .remove = denali_pci_remove,
1708};
1709
1710module_pci_driver(denali_pci_driver);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index fabb9d56b39e..cec5712862c9 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -466,6 +466,7 @@ struct nand_buf {
466 466
467#define INTEL_CE4100 1 467#define INTEL_CE4100 1
468#define INTEL_MRST 2 468#define INTEL_MRST 2
469#define DT 3
469 470
470struct denali_nand_info { 471struct denali_nand_info {
471 struct mtd_info mtd; 472 struct mtd_info mtd;
@@ -487,6 +488,7 @@ struct denali_nand_info {
487 uint32_t irq_status; 488 uint32_t irq_status;
488 int irq_debug_array[32]; 489 int irq_debug_array[32];
489 int idx; 490 int idx;
491 int irq;
490 492
491 uint32_t devnum; /* represent how many nands connected */ 493 uint32_t devnum; /* represent how many nands connected */
492 uint32_t fwblks; /* represent how many blocks FW used */ 494 uint32_t fwblks; /* represent how many blocks FW used */
@@ -496,4 +498,7 @@ struct denali_nand_info {
496 uint32_t max_banks; 498 uint32_t max_banks;
497}; 499};
498 500
501extern int denali_init(struct denali_nand_info *denali);
502extern void denali_remove(struct denali_nand_info *denali);
503
499#endif /*_LLD_NAND_*/ 504#endif /*_LLD_NAND_*/
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
new file mode 100644
index 000000000000..546f8cb5688d
--- /dev/null
+++ b/drivers/mtd/nand/denali_dt.c
@@ -0,0 +1,167 @@
1/*
2 * NAND Flash Controller Device Driver for DT
3 *
4 * Copyright © 2011, Picochip.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15#include <linux/clk.h>
16#include <linux/err.h>
17#include <linux/io.h>
18#include <linux/ioport.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/slab.h>
25
26#include "denali.h"
27
28struct denali_dt {
29 struct denali_nand_info denali;
30 struct clk *clk;
31};
32
33static void __iomem *request_and_map(struct device *dev,
34 const struct resource *res)
35{
36 void __iomem *ptr;
37
38 if (!devm_request_mem_region(dev, res->start, resource_size(res),
39 "denali-dt")) {
40 dev_err(dev, "unable to request %s\n", res->name);
41 return NULL;
42 }
43
44 ptr = devm_ioremap_nocache(dev, res->start, resource_size(res));
45 if (!res)
46 dev_err(dev, "ioremap_nocache of %s failed!", res->name);
47
48 return ptr;
49}
50
51static const struct of_device_id denali_nand_dt_ids[] = {
52 { .compatible = "denali,denali-nand-dt" },
53 { /* sentinel */ }
54 };
55
56MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
57
58static u64 denali_dma_mask;
59
60static int denali_dt_probe(struct platform_device *ofdev)
61{
62 struct resource *denali_reg, *nand_data;
63 struct denali_dt *dt;
64 struct denali_nand_info *denali;
65 int ret;
66 const struct of_device_id *of_id;
67
68 of_id = of_match_device(denali_nand_dt_ids, &ofdev->dev);
69 if (of_id) {
70 ofdev->id_entry = of_id->data;
71 } else {
72 pr_err("Failed to find the right device id.\n");
73 return -ENOMEM;
74 }
75
76 dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
77 if (!dt)
78 return -ENOMEM;
79 denali = &dt->denali;
80
81 denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg");
82 nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data");
83 if (!denali_reg || !nand_data) {
84 dev_err(&ofdev->dev, "resources not completely defined\n");
85 return -EINVAL;
86 }
87
88 denali->platform = DT;
89 denali->dev = &ofdev->dev;
90 denali->irq = platform_get_irq(ofdev, 0);
91 if (denali->irq < 0) {
92 dev_err(&ofdev->dev, "no irq defined\n");
93 return -ENXIO;
94 }
95
96 denali->flash_reg = request_and_map(&ofdev->dev, denali_reg);
97 if (!denali->flash_reg)
98 return -ENOMEM;
99
100 denali->flash_mem = request_and_map(&ofdev->dev, nand_data);
101 if (!denali->flash_mem)
102 return -ENOMEM;
103
104 if (!of_property_read_u32(ofdev->dev.of_node,
105 "dma-mask", (u32 *)&denali_dma_mask)) {
106 denali->dev->dma_mask = &denali_dma_mask;
107 } else {
108 denali->dev->dma_mask = NULL;
109 }
110
111 dt->clk = clk_get(&ofdev->dev, NULL);
112 if (IS_ERR(dt->clk)) {
113 dev_err(&ofdev->dev, "no clk available\n");
114 return PTR_ERR(dt->clk);
115 }
116 clk_prepare_enable(dt->clk);
117
118 ret = denali_init(denali);
119 if (ret)
120 goto out_disable_clk;
121
122 platform_set_drvdata(ofdev, dt);
123 return 0;
124
125out_disable_clk:
126 clk_disable_unprepare(dt->clk);
127 clk_put(dt->clk);
128
129 return ret;
130}
131
132static int denali_dt_remove(struct platform_device *ofdev)
133{
134 struct denali_dt *dt = platform_get_drvdata(ofdev);
135
136 denali_remove(&dt->denali);
137 clk_disable(dt->clk);
138 clk_put(dt->clk);
139
140 return 0;
141}
142
143static struct platform_driver denali_dt_driver = {
144 .probe = denali_dt_probe,
145 .remove = denali_dt_remove,
146 .driver = {
147 .name = "denali-nand-dt",
148 .owner = THIS_MODULE,
149 .of_match_table = of_match_ptr(denali_nand_dt_ids),
150 },
151};
152
153static int __init denali_init_dt(void)
154{
155 return platform_driver_register(&denali_dt_driver);
156}
157module_init(denali_init_dt);
158
159static void __exit denali_exit_dt(void)
160{
161 platform_driver_unregister(&denali_dt_driver);
162}
163module_exit(denali_exit_dt);
164
165MODULE_LICENSE("GPL");
166MODULE_AUTHOR("Jamie Iles");
167MODULE_DESCRIPTION("DT driver for Denali NAND controller");
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
new file mode 100644
index 000000000000..e3e46623b2b4
--- /dev/null
+++ b/drivers/mtd/nand/denali_pci.c
@@ -0,0 +1,144 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/pci.h>
17#include <linux/slab.h>
18
19#include "denali.h"
20
21#define DENALI_NAND_NAME "denali-nand-pci"
22
23/* List of platforms this NAND controller has be integrated into */
24static DEFINE_PCI_DEVICE_TABLE(denali_pci_ids) = {
25 { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
26 { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
27 { /* end: all zeroes */ }
28};
29MODULE_DEVICE_TABLE(pci, denali_pci_ids);
30
31static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
32{
33 int ret = -ENODEV;
34 resource_size_t csr_base, mem_base;
35 unsigned long csr_len, mem_len;
36 struct denali_nand_info *denali;
37
38 denali = kzalloc(sizeof(*denali), GFP_KERNEL);
39 if (!denali)
40 return -ENOMEM;
41
42 ret = pci_enable_device(dev);
43 if (ret) {
44 pr_err("Spectra: pci_enable_device failed.\n");
45 goto failed_alloc_memery;
46 }
47
48 if (id->driver_data == INTEL_CE4100) {
49 denali->platform = INTEL_CE4100;
50 mem_base = pci_resource_start(dev, 0);
51 mem_len = pci_resource_len(dev, 1);
52 csr_base = pci_resource_start(dev, 1);
53 csr_len = pci_resource_len(dev, 1);
54 } else {
55 denali->platform = INTEL_MRST;
56 csr_base = pci_resource_start(dev, 0);
57 csr_len = pci_resource_len(dev, 0);
58 mem_base = pci_resource_start(dev, 1);
59 mem_len = pci_resource_len(dev, 1);
60 if (!mem_len) {
61 mem_base = csr_base + csr_len;
62 mem_len = csr_len;
63 }
64 }
65
66 pci_set_master(dev);
67 denali->dev = &dev->dev;
68 denali->irq = dev->irq;
69
70 ret = pci_request_regions(dev, DENALI_NAND_NAME);
71 if (ret) {
72 pr_err("Spectra: Unable to request memory regions\n");
73 goto failed_enable_dev;
74 }
75
76 denali->flash_reg = ioremap_nocache(csr_base, csr_len);
77 if (!denali->flash_reg) {
78 pr_err("Spectra: Unable to remap memory region\n");
79 ret = -ENOMEM;
80 goto failed_req_regions;
81 }
82
83 denali->flash_mem = ioremap_nocache(mem_base, mem_len);
84 if (!denali->flash_mem) {
85 pr_err("Spectra: ioremap_nocache failed!");
86 ret = -ENOMEM;
87 goto failed_remap_reg;
88 }
89
90 ret = denali_init(denali);
91 if (ret)
92 goto failed_remap_mem;
93
94 pci_set_drvdata(dev, denali);
95
96 return 0;
97
98failed_remap_mem:
99 iounmap(denali->flash_mem);
100failed_remap_reg:
101 iounmap(denali->flash_reg);
102failed_req_regions:
103 pci_release_regions(dev);
104failed_enable_dev:
105 pci_disable_device(dev);
106failed_alloc_memery:
107 kfree(denali);
108
109 return ret;
110}
111
112/* driver exit point */
113static void denali_pci_remove(struct pci_dev *dev)
114{
115 struct denali_nand_info *denali = pci_get_drvdata(dev);
116
117 denali_remove(denali);
118 iounmap(denali->flash_reg);
119 iounmap(denali->flash_mem);
120 pci_release_regions(dev);
121 pci_disable_device(dev);
122 pci_set_drvdata(dev, NULL);
123 kfree(denali);
124}
125
126static struct pci_driver denali_pci_driver = {
127 .name = DENALI_NAND_NAME,
128 .id_table = denali_pci_ids,
129 .probe = denali_pci_probe,
130 .remove = denali_pci_remove,
131};
132
133static int denali_init_pci(void)
134{
135 pr_info("Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__);
136 return pci_register_driver(&denali_pci_driver);
137}
138module_init(denali_init_pci);
139
140static void denali_exit_pci(void)
141{
142 pci_unregister_driver(&denali_pci_driver);
143}
144module_exit(denali_exit_pci);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 256eb30f6180..81fa5784f98b 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -53,8 +53,6 @@ static unsigned long __initdata doc_locations[] = {
53 0xe0000, 0xe2000, 0xe4000, 0xe6000, 53 0xe0000, 0xe2000, 0xe4000, 0xe6000,
54 0xe8000, 0xea000, 0xec000, 0xee000, 54 0xe8000, 0xea000, 0xec000, 0xee000,
55#endif /* CONFIG_MTD_DOCPROBE_HIGH */ 55#endif /* CONFIG_MTD_DOCPROBE_HIGH */
56#else
57#warning Unknown architecture for DiskOnChip. No default probe locations defined
58#endif 56#endif
59 0xffffffff }; 57 0xffffffff };
60 58
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 799da5d1c857..18fa4489e52e 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -46,6 +46,25 @@
46#include <linux/bitrev.h> 46#include <linux/bitrev.h>
47 47
48/* 48/*
49 * In "reliable mode" consecutive 2k pages are used in parallel (in some
50 * fashion) to store the same data. The data can be read back from the
51 * even-numbered pages in the normal manner; odd-numbered pages will appear to
52 * contain junk. Systems that boot from the docg4 typically write the secondary
53 * program loader (SPL) code in this mode. The SPL is loaded by the initial
54 * program loader (IPL, stored in the docg4's 2k NOR-like region that is mapped
55 * to the reset vector address). This module parameter enables you to use this
56 * driver to write the SPL. When in this mode, no more than 2k of data can be
57 * written at a time, because the addresses do not increment in the normal
58 * manner, and the starting offset must be within an even-numbered 2k region;
59 * i.e., invalid starting offsets are 0x800, 0xa00, 0xc00, 0xe00, 0x1800,
60 * 0x1a00, ... Reliable mode is a special case and should not be used unless
61 * you know what you're doing.
62 */
63static bool reliable_mode;
64module_param(reliable_mode, bool, 0);
65MODULE_PARM_DESC(reliable_mode, "pages are programmed in reliable mode");
66
67/*
49 * You'll want to ignore badblocks if you're reading a partition that contains 68 * You'll want to ignore badblocks if you're reading a partition that contains
50 * data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since 69 * data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since
51 * it does not use mtd nand's method for marking bad blocks (using oob area). 70 * it does not use mtd nand's method for marking bad blocks (using oob area).
@@ -113,6 +132,7 @@ struct docg4_priv {
113#define DOCG4_SEQ_PAGEWRITE 0x16 132#define DOCG4_SEQ_PAGEWRITE 0x16
114#define DOCG4_SEQ_PAGEPROG 0x1e 133#define DOCG4_SEQ_PAGEPROG 0x1e
115#define DOCG4_SEQ_BLOCKERASE 0x24 134#define DOCG4_SEQ_BLOCKERASE 0x24
135#define DOCG4_SEQ_SETMODE 0x45
116 136
117/* DOC_FLASHCOMMAND register commands */ 137/* DOC_FLASHCOMMAND register commands */
118#define DOCG4_CMD_PAGE_READ 0x00 138#define DOCG4_CMD_PAGE_READ 0x00
@@ -122,6 +142,8 @@ struct docg4_priv {
122#define DOC_CMD_PROG_BLOCK_ADDR 0x60 142#define DOC_CMD_PROG_BLOCK_ADDR 0x60
123#define DOCG4_CMD_PAGEWRITE 0x80 143#define DOCG4_CMD_PAGEWRITE 0x80
124#define DOC_CMD_PROG_CYCLE2 0x10 144#define DOC_CMD_PROG_CYCLE2 0x10
145#define DOCG4_CMD_FAST_MODE 0xa3 /* functionality guessed */
146#define DOC_CMD_RELIABLE_MODE 0x22
125#define DOC_CMD_RESET 0xff 147#define DOC_CMD_RESET 0xff
126 148
127/* DOC_POWERMODE register bits */ 149/* DOC_POWERMODE register bits */
@@ -190,17 +212,20 @@ struct docg4_priv {
190#define DOCG4_T 4 /* BCH alg corrects up to 4 bit errors */ 212#define DOCG4_T 4 /* BCH alg corrects up to 4 bit errors */
191 213
192#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */ 214#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */
215#define DOCG4_REDUNDANT_BBT_PAGE 24 /* page where redundant factory bbt lives */
193 216
194/* 217/*
195 * Oob bytes 0 - 6 are available to the user. 218 * Bytes 0, 1 are used as badblock marker.
196 * Byte 7 is hamming ecc for first 7 bytes. Bytes 8 - 14 are hw-generated ecc. 219 * Bytes 2 - 6 are available to the user.
220 * Byte 7 is hamming ecc for first 7 oob bytes only.
221 * Bytes 8 - 14 are hw-generated ecc covering entire page + oob bytes 0 - 14.
197 * Byte 15 (the last) is used by the driver as a "page written" flag. 222 * Byte 15 (the last) is used by the driver as a "page written" flag.
198 */ 223 */
199static struct nand_ecclayout docg4_oobinfo = { 224static struct nand_ecclayout docg4_oobinfo = {
200 .eccbytes = 9, 225 .eccbytes = 9,
201 .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15}, 226 .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
202 .oobavail = 7, 227 .oobavail = 5,
203 .oobfree = { {0, 7} } 228 .oobfree = { {.offset = 2, .length = 5} }
204}; 229};
205 230
206/* 231/*
@@ -611,6 +636,14 @@ static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
611 dev_dbg(doc->dev, 636 dev_dbg(doc->dev,
612 "docg4: %s: g4 addr: %x\n", __func__, docg4_addr); 637 "docg4: %s: g4 addr: %x\n", __func__, docg4_addr);
613 sequence_reset(mtd); 638 sequence_reset(mtd);
639
640 if (unlikely(reliable_mode)) {
641 writew(DOCG4_SEQ_SETMODE, docptr + DOC_FLASHSEQUENCE);
642 writew(DOCG4_CMD_FAST_MODE, docptr + DOC_FLASHCOMMAND);
643 writew(DOC_CMD_RELIABLE_MODE, docptr + DOC_FLASHCOMMAND);
644 write_nop(docptr);
645 }
646
614 writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE); 647 writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE);
615 writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND); 648 writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND);
616 write_nop(docptr); 649 write_nop(docptr);
@@ -691,6 +724,15 @@ static void docg4_command(struct mtd_info *mtd, unsigned command, int column,
691 break; 724 break;
692 725
693 case NAND_CMD_SEQIN: 726 case NAND_CMD_SEQIN:
727 if (unlikely(reliable_mode)) {
728 uint16_t g4_page = g4_addr >> 16;
729
730 /* writes to odd-numbered 2k pages are invalid */
731 if (g4_page & 0x01)
732 dev_warn(doc->dev,
733 "invalid reliable mode address\n");
734 }
735
694 write_page_prologue(mtd, g4_addr); 736 write_page_prologue(mtd, g4_addr);
695 737
696 /* hack for deferred write of oob bytes */ 738 /* hack for deferred write of oob bytes */
@@ -979,16 +1021,15 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
979 struct docg4_priv *doc = nand->priv; 1021 struct docg4_priv *doc = nand->priv;
980 uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0); 1022 uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0);
981 uint8_t *buf; 1023 uint8_t *buf;
982 int i, block, status; 1024 int i, block;
1025 __u32 eccfailed_stats = mtd->ecc_stats.failed;
983 1026
984 buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL); 1027 buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
985 if (buf == NULL) 1028 if (buf == NULL)
986 return -ENOMEM; 1029 return -ENOMEM;
987 1030
988 read_page_prologue(mtd, g4_addr); 1031 read_page_prologue(mtd, g4_addr);
989 status = docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE); 1032 docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
990 if (status)
991 goto exit;
992 1033
993 /* 1034 /*
994 * If no memory-based bbt was created, exit. This will happen if module 1035 * If no memory-based bbt was created, exit. This will happen if module
@@ -1000,6 +1041,20 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
1000 if (nand->bbt == NULL) /* no memory-based bbt */ 1041 if (nand->bbt == NULL) /* no memory-based bbt */
1001 goto exit; 1042 goto exit;
1002 1043
1044 if (mtd->ecc_stats.failed > eccfailed_stats) {
1045 /*
1046 * Whoops, an ecc failure ocurred reading the factory bbt.
1047 * It is stored redundantly, so we get another chance.
1048 */
1049 eccfailed_stats = mtd->ecc_stats.failed;
1050 docg4_read_page(mtd, nand, buf, 0, DOCG4_REDUNDANT_BBT_PAGE);
1051 if (mtd->ecc_stats.failed > eccfailed_stats) {
1052 dev_warn(doc->dev,
1053 "The factory bbt could not be read!\n");
1054 goto exit;
1055 }
1056 }
1057
1003 /* 1058 /*
1004 * Parse factory bbt and update memory-based bbt. Factory bbt format is 1059 * Parse factory bbt and update memory-based bbt. Factory bbt format is
1005 * simple: one bit per block, block numbers increase left to right (msb 1060 * simple: one bit per block, block numbers increase left to right (msb
@@ -1019,7 +1074,7 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
1019 } 1074 }
1020 exit: 1075 exit:
1021 kfree(buf); 1076 kfree(buf);
1022 return status; 1077 return 0;
1023} 1078}
1024 1079
1025static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs) 1080static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index cc1480a5e4c1..20657209a472 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -109,20 +109,6 @@ static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
109}; 109};
110 110
111/* 111/*
112 * fsl_elbc_oob_lp_eccm* specify that LP NAND's OOB free area starts at offset
113 * 1, so we have to adjust bad block pattern. This pattern should be used for
114 * x8 chips only. So far hardware does not support x16 chips anyway.
115 */
116static u8 scan_ff_pattern[] = { 0xff, };
117
118static struct nand_bbt_descr largepage_memorybased = {
119 .options = 0,
120 .offs = 0,
121 .len = 1,
122 .pattern = scan_ff_pattern,
123};
124
125/*
126 * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt, 112 * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
127 * interfere with ECC positions, that's why we implement our own descriptors. 113 * interfere with ECC positions, that's why we implement our own descriptors.
128 * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0. 114 * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
@@ -699,7 +685,6 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
699 chip->ecc.layout = (priv->fmr & FMR_ECCM) ? 685 chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
700 &fsl_elbc_oob_lp_eccm1 : 686 &fsl_elbc_oob_lp_eccm1 :
701 &fsl_elbc_oob_lp_eccm0; 687 &fsl_elbc_oob_lp_eccm0;
702 chip->badblock_pattern = &largepage_memorybased;
703 } 688 }
704 } else { 689 } else {
705 dev_err(priv->dev, 690 dev_err(priv->dev,
@@ -814,7 +799,7 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
814 799
815static DEFINE_MUTEX(fsl_elbc_nand_mutex); 800static DEFINE_MUTEX(fsl_elbc_nand_mutex);
816 801
817static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev) 802static int fsl_elbc_nand_probe(struct platform_device *pdev)
818{ 803{
819 struct fsl_lbc_regs __iomem *lbc; 804 struct fsl_lbc_regs __iomem *lbc;
820 struct fsl_elbc_mtd *priv; 805 struct fsl_elbc_mtd *priv;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 3551a99076ba..ad6222627fed 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -389,7 +389,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
389 timing = IFC_FIR_OP_RBCD; 389 timing = IFC_FIR_OP_RBCD;
390 390
391 out_be32(&ifc->ifc_nand.nand_fir0, 391 out_be32(&ifc->ifc_nand.nand_fir0,
392 (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) | 392 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
393 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 393 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
394 (timing << IFC_NAND_FIR0_OP2_SHIFT)); 394 (timing << IFC_NAND_FIR0_OP2_SHIFT));
395 out_be32(&ifc->ifc_nand.nand_fcr0, 395 out_be32(&ifc->ifc_nand.nand_fcr0,
@@ -754,7 +754,7 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
754 754
755 /* READID */ 755 /* READID */
756 out_be32(&ifc->ifc_nand.nand_fir0, 756 out_be32(&ifc->ifc_nand.nand_fir0,
757 (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) | 757 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
758 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 758 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
759 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT)); 759 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
760 out_be32(&ifc->ifc_nand.nand_fcr0, 760 out_be32(&ifc->ifc_nand.nand_fcr0,
@@ -922,7 +922,7 @@ static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
922 922
923static DEFINE_MUTEX(fsl_ifc_nand_mutex); 923static DEFINE_MUTEX(fsl_ifc_nand_mutex);
924 924
925static int __devinit fsl_ifc_nand_probe(struct platform_device *dev) 925static int fsl_ifc_nand_probe(struct platform_device *dev)
926{ 926{
927 struct fsl_ifc_regs __iomem *ifc; 927 struct fsl_ifc_regs __iomem *ifc;
928 struct fsl_ifc_mtd *priv; 928 struct fsl_ifc_mtd *priv;
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 45df542b9c61..5a8f5c4ce512 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -152,7 +152,7 @@ static void fun_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
152 fun_wait_rnb(fun); 152 fun_wait_rnb(fun);
153} 153}
154 154
155static int __devinit fun_chip_init(struct fsl_upm_nand *fun, 155static int fun_chip_init(struct fsl_upm_nand *fun,
156 const struct device_node *upm_np, 156 const struct device_node *upm_np,
157 const struct resource *io_res) 157 const struct resource *io_res)
158{ 158{
@@ -201,7 +201,7 @@ err:
201 return ret; 201 return ret;
202} 202}
203 203
204static int __devinit fun_probe(struct platform_device *ofdev) 204static int fun_probe(struct platform_device *ofdev)
205{ 205{
206 struct fsl_upm_nand *fun; 206 struct fsl_upm_nand *fun;
207 struct resource io_res; 207 struct resource io_res;
@@ -318,7 +318,7 @@ err1:
318 return ret; 318 return ret;
319} 319}
320 320
321static int __devexit fun_remove(struct platform_device *ofdev) 321static int fun_remove(struct platform_device *ofdev)
322{ 322{
323 struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev); 323 struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
324 int i; 324 int i;
@@ -350,7 +350,7 @@ static struct platform_driver of_fun_driver = {
350 .of_match_table = of_fun_match, 350 .of_match_table = of_fun_match,
351 }, 351 },
352 .probe = fun_probe, 352 .probe = fun_probe,
353 .remove = __devexit_p(fun_remove), 353 .remove = fun_remove,
354}; 354};
355 355
356module_platform_driver(of_fun_driver); 356module_platform_driver(of_fun_driver);
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 38d26240d8b1..1d7446434b0e 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -361,7 +361,7 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
361 struct nand_chip *this = mtd->priv; 361 struct nand_chip *this = mtd->priv;
362 struct fsmc_nand_data *host = container_of(mtd, 362 struct fsmc_nand_data *host = container_of(mtd,
363 struct fsmc_nand_data, mtd); 363 struct fsmc_nand_data, mtd);
364 void *__iomem *regs = host->regs_va; 364 void __iomem *regs = host->regs_va;
365 unsigned int bank = host->bank; 365 unsigned int bank = host->bank;
366 366
367 if (ctrl & NAND_CTRL_CHANGE) { 367 if (ctrl & NAND_CTRL_CHANGE) {
@@ -383,13 +383,13 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
383 pc |= FSMC_ENABLE; 383 pc |= FSMC_ENABLE;
384 else 384 else
385 pc &= ~FSMC_ENABLE; 385 pc &= ~FSMC_ENABLE;
386 writel(pc, FSMC_NAND_REG(regs, bank, PC)); 386 writel_relaxed(pc, FSMC_NAND_REG(regs, bank, PC));
387 } 387 }
388 388
389 mb(); 389 mb();
390 390
391 if (cmd != NAND_CMD_NONE) 391 if (cmd != NAND_CMD_NONE)
392 writeb(cmd, this->IO_ADDR_W); 392 writeb_relaxed(cmd, this->IO_ADDR_W);
393} 393}
394 394
395/* 395/*
@@ -426,14 +426,18 @@ static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
426 tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT; 426 tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
427 427
428 if (busw) 428 if (busw)
429 writel(value | FSMC_DEVWID_16, FSMC_NAND_REG(regs, bank, PC)); 429 writel_relaxed(value | FSMC_DEVWID_16,
430 FSMC_NAND_REG(regs, bank, PC));
430 else 431 else
431 writel(value | FSMC_DEVWID_8, FSMC_NAND_REG(regs, bank, PC)); 432 writel_relaxed(value | FSMC_DEVWID_8,
433 FSMC_NAND_REG(regs, bank, PC));
432 434
433 writel(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar, 435 writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
434 FSMC_NAND_REG(regs, bank, PC)); 436 FSMC_NAND_REG(regs, bank, PC));
435 writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, COMM)); 437 writel_relaxed(thiz | thold | twait | tset,
436 writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, ATTRIB)); 438 FSMC_NAND_REG(regs, bank, COMM));
439 writel_relaxed(thiz | thold | twait | tset,
440 FSMC_NAND_REG(regs, bank, ATTRIB));
437} 441}
438 442
439/* 443/*
@@ -446,11 +450,11 @@ static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
446 void __iomem *regs = host->regs_va; 450 void __iomem *regs = host->regs_va;
447 uint32_t bank = host->bank; 451 uint32_t bank = host->bank;
448 452
449 writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256, 453 writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
450 FSMC_NAND_REG(regs, bank, PC)); 454 FSMC_NAND_REG(regs, bank, PC));
451 writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN, 455 writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
452 FSMC_NAND_REG(regs, bank, PC)); 456 FSMC_NAND_REG(regs, bank, PC));
453 writel(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN, 457 writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
454 FSMC_NAND_REG(regs, bank, PC)); 458 FSMC_NAND_REG(regs, bank, PC));
455} 459}
456 460
@@ -470,7 +474,7 @@ static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
470 unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT; 474 unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
471 475
472 do { 476 do {
473 if (readl(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY) 477 if (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
474 break; 478 break;
475 else 479 else
476 cond_resched(); 480 cond_resched();
@@ -481,25 +485,25 @@ static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
481 return -ETIMEDOUT; 485 return -ETIMEDOUT;
482 } 486 }
483 487
484 ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1)); 488 ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
485 ecc[0] = (uint8_t) (ecc_tmp >> 0); 489 ecc[0] = (uint8_t) (ecc_tmp >> 0);
486 ecc[1] = (uint8_t) (ecc_tmp >> 8); 490 ecc[1] = (uint8_t) (ecc_tmp >> 8);
487 ecc[2] = (uint8_t) (ecc_tmp >> 16); 491 ecc[2] = (uint8_t) (ecc_tmp >> 16);
488 ecc[3] = (uint8_t) (ecc_tmp >> 24); 492 ecc[3] = (uint8_t) (ecc_tmp >> 24);
489 493
490 ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC2)); 494 ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
491 ecc[4] = (uint8_t) (ecc_tmp >> 0); 495 ecc[4] = (uint8_t) (ecc_tmp >> 0);
492 ecc[5] = (uint8_t) (ecc_tmp >> 8); 496 ecc[5] = (uint8_t) (ecc_tmp >> 8);
493 ecc[6] = (uint8_t) (ecc_tmp >> 16); 497 ecc[6] = (uint8_t) (ecc_tmp >> 16);
494 ecc[7] = (uint8_t) (ecc_tmp >> 24); 498 ecc[7] = (uint8_t) (ecc_tmp >> 24);
495 499
496 ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC3)); 500 ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
497 ecc[8] = (uint8_t) (ecc_tmp >> 0); 501 ecc[8] = (uint8_t) (ecc_tmp >> 0);
498 ecc[9] = (uint8_t) (ecc_tmp >> 8); 502 ecc[9] = (uint8_t) (ecc_tmp >> 8);
499 ecc[10] = (uint8_t) (ecc_tmp >> 16); 503 ecc[10] = (uint8_t) (ecc_tmp >> 16);
500 ecc[11] = (uint8_t) (ecc_tmp >> 24); 504 ecc[11] = (uint8_t) (ecc_tmp >> 24);
501 505
502 ecc_tmp = readl(FSMC_NAND_REG(regs, bank, STS)); 506 ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
503 ecc[12] = (uint8_t) (ecc_tmp >> 16); 507 ecc[12] = (uint8_t) (ecc_tmp >> 16);
504 508
505 return 0; 509 return 0;
@@ -519,7 +523,7 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
519 uint32_t bank = host->bank; 523 uint32_t bank = host->bank;
520 uint32_t ecc_tmp; 524 uint32_t ecc_tmp;
521 525
522 ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1)); 526 ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
523 ecc[0] = (uint8_t) (ecc_tmp >> 0); 527 ecc[0] = (uint8_t) (ecc_tmp >> 0);
524 ecc[1] = (uint8_t) (ecc_tmp >> 8); 528 ecc[1] = (uint8_t) (ecc_tmp >> 8);
525 ecc[2] = (uint8_t) (ecc_tmp >> 16); 529 ecc[2] = (uint8_t) (ecc_tmp >> 16);
@@ -601,7 +605,7 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
601 dma_async_issue_pending(chan); 605 dma_async_issue_pending(chan);
602 606
603 ret = 607 ret =
604 wait_for_completion_interruptible_timeout(&host->dma_access_complete, 608 wait_for_completion_timeout(&host->dma_access_complete,
605 msecs_to_jiffies(3000)); 609 msecs_to_jiffies(3000));
606 if (ret <= 0) { 610 if (ret <= 0) {
607 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 611 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
@@ -628,10 +632,10 @@ static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
628 uint32_t *p = (uint32_t *)buf; 632 uint32_t *p = (uint32_t *)buf;
629 len = len >> 2; 633 len = len >> 2;
630 for (i = 0; i < len; i++) 634 for (i = 0; i < len; i++)
631 writel(p[i], chip->IO_ADDR_W); 635 writel_relaxed(p[i], chip->IO_ADDR_W);
632 } else { 636 } else {
633 for (i = 0; i < len; i++) 637 for (i = 0; i < len; i++)
634 writeb(buf[i], chip->IO_ADDR_W); 638 writeb_relaxed(buf[i], chip->IO_ADDR_W);
635 } 639 }
636} 640}
637 641
@@ -651,10 +655,10 @@ static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
651 uint32_t *p = (uint32_t *)buf; 655 uint32_t *p = (uint32_t *)buf;
652 len = len >> 2; 656 len = len >> 2;
653 for (i = 0; i < len; i++) 657 for (i = 0; i < len; i++)
654 p[i] = readl(chip->IO_ADDR_R); 658 p[i] = readl_relaxed(chip->IO_ADDR_R);
655 } else { 659 } else {
656 for (i = 0; i < len; i++) 660 for (i = 0; i < len; i++)
657 buf[i] = readb(chip->IO_ADDR_R); 661 buf[i] = readb_relaxed(chip->IO_ADDR_R);
658 } 662 }
659} 663}
660 664
@@ -783,7 +787,7 @@ static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
783 uint32_t num_err, i; 787 uint32_t num_err, i;
784 uint32_t ecc1, ecc2, ecc3, ecc4; 788 uint32_t ecc1, ecc2, ecc3, ecc4;
785 789
786 num_err = (readl(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF; 790 num_err = (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
787 791
788 /* no bit flipping */ 792 /* no bit flipping */
789 if (likely(num_err == 0)) 793 if (likely(num_err == 0))
@@ -826,10 +830,10 @@ static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
826 * uint64_t array and error offset indexes are populated in err_idx 830 * uint64_t array and error offset indexes are populated in err_idx
827 * array 831 * array
828 */ 832 */
829 ecc1 = readl(FSMC_NAND_REG(regs, bank, ECC1)); 833 ecc1 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
830 ecc2 = readl(FSMC_NAND_REG(regs, bank, ECC2)); 834 ecc2 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
831 ecc3 = readl(FSMC_NAND_REG(regs, bank, ECC3)); 835 ecc3 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
832 ecc4 = readl(FSMC_NAND_REG(regs, bank, STS)); 836 ecc4 = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
833 837
834 err_idx[0] = (ecc1 >> 0) & 0x1FFF; 838 err_idx[0] = (ecc1 >> 0) & 0x1FFF;
835 err_idx[1] = (ecc1 >> 13) & 0x1FFF; 839 err_idx[1] = (ecc1 >> 13) & 0x1FFF;
@@ -860,7 +864,7 @@ static bool filter(struct dma_chan *chan, void *slave)
860} 864}
861 865
862#ifdef CONFIG_OF 866#ifdef CONFIG_OF
863static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev, 867static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
864 struct device_node *np) 868 struct device_node *np)
865{ 869{
866 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); 870 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -876,15 +880,13 @@ static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
876 return -EINVAL; 880 return -EINVAL;
877 } 881 }
878 } 882 }
879 of_property_read_u32(np, "st,ale-off", &pdata->ale_off);
880 of_property_read_u32(np, "st,cle-off", &pdata->cle_off);
881 if (of_get_property(np, "nand-skip-bbtscan", NULL)) 883 if (of_get_property(np, "nand-skip-bbtscan", NULL))
882 pdata->options = NAND_SKIP_BBTSCAN; 884 pdata->options = NAND_SKIP_BBTSCAN;
883 885
884 return 0; 886 return 0;
885} 887}
886#else 888#else
887static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev, 889static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
888 struct device_node *np) 890 struct device_node *np)
889{ 891{
890 return -ENOSYS; 892 return -ENOSYS;
@@ -935,41 +937,28 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
935 if (!res) 937 if (!res)
936 return -EINVAL; 938 return -EINVAL;
937 939
938 if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), 940 host->data_va = devm_request_and_ioremap(&pdev->dev, res);
939 pdev->name)) {
940 dev_err(&pdev->dev, "Failed to get memory data resourse\n");
941 return -ENOENT;
942 }
943
944 host->data_pa = (dma_addr_t)res->start;
945 host->data_va = devm_ioremap(&pdev->dev, res->start,
946 resource_size(res));
947 if (!host->data_va) { 941 if (!host->data_va) {
948 dev_err(&pdev->dev, "data ioremap failed\n"); 942 dev_err(&pdev->dev, "data ioremap failed\n");
949 return -ENOMEM; 943 return -ENOMEM;
950 } 944 }
945 host->data_pa = (dma_addr_t)res->start;
951 946
952 if (!devm_request_mem_region(&pdev->dev, res->start + pdata->ale_off, 947 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
953 resource_size(res), pdev->name)) { 948 if (!res)
954 dev_err(&pdev->dev, "Failed to get memory ale resourse\n"); 949 return -EINVAL;
955 return -ENOENT;
956 }
957 950
958 host->addr_va = devm_ioremap(&pdev->dev, res->start + pdata->ale_off, 951 host->addr_va = devm_request_and_ioremap(&pdev->dev, res);
959 resource_size(res));
960 if (!host->addr_va) { 952 if (!host->addr_va) {
961 dev_err(&pdev->dev, "ale ioremap failed\n"); 953 dev_err(&pdev->dev, "ale ioremap failed\n");
962 return -ENOMEM; 954 return -ENOMEM;
963 } 955 }
964 956
965 if (!devm_request_mem_region(&pdev->dev, res->start + pdata->cle_off, 957 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
966 resource_size(res), pdev->name)) { 958 if (!res)
967 dev_err(&pdev->dev, "Failed to get memory cle resourse\n"); 959 return -EINVAL;
968 return -ENOENT;
969 }
970 960
971 host->cmd_va = devm_ioremap(&pdev->dev, res->start + pdata->cle_off, 961 host->cmd_va = devm_request_and_ioremap(&pdev->dev, res);
972 resource_size(res));
973 if (!host->cmd_va) { 962 if (!host->cmd_va) {
974 dev_err(&pdev->dev, "ale ioremap failed\n"); 963 dev_err(&pdev->dev, "ale ioremap failed\n");
975 return -ENOMEM; 964 return -ENOMEM;
@@ -979,14 +968,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
979 if (!res) 968 if (!res)
980 return -EINVAL; 969 return -EINVAL;
981 970
982 if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), 971 host->regs_va = devm_request_and_ioremap(&pdev->dev, res);
983 pdev->name)) {
984 dev_err(&pdev->dev, "Failed to get memory regs resourse\n");
985 return -ENOENT;
986 }
987
988 host->regs_va = devm_ioremap(&pdev->dev, res->start,
989 resource_size(res));
990 if (!host->regs_va) { 972 if (!host->regs_va) {
991 dev_err(&pdev->dev, "regs ioremap failed\n"); 973 dev_err(&pdev->dev, "regs ioremap failed\n");
992 return -ENOMEM; 974 return -ENOMEM;
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index bc73bc5f2713..e789e3f51710 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -90,14 +90,14 @@ static void gpio_nand_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
90{ 90{
91 struct nand_chip *this = mtd->priv; 91 struct nand_chip *this = mtd->priv;
92 92
93 writesb(this->IO_ADDR_W, buf, len); 93 iowrite8_rep(this->IO_ADDR_W, buf, len);
94} 94}
95 95
96static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len) 96static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len)
97{ 97{
98 struct nand_chip *this = mtd->priv; 98 struct nand_chip *this = mtd->priv;
99 99
100 readsb(this->IO_ADDR_R, buf, len); 100 ioread8_rep(this->IO_ADDR_R, buf, len);
101} 101}
102 102
103static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf, 103static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
@@ -106,7 +106,7 @@ static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
106 struct nand_chip *this = mtd->priv; 106 struct nand_chip *this = mtd->priv;
107 107
108 if (IS_ALIGNED((unsigned long)buf, 2)) { 108 if (IS_ALIGNED((unsigned long)buf, 2)) {
109 writesw(this->IO_ADDR_W, buf, len>>1); 109 iowrite16_rep(this->IO_ADDR_W, buf, len>>1);
110 } else { 110 } else {
111 int i; 111 int i;
112 unsigned short *ptr = (unsigned short *)buf; 112 unsigned short *ptr = (unsigned short *)buf;
@@ -121,7 +121,7 @@ static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
121 struct nand_chip *this = mtd->priv; 121 struct nand_chip *this = mtd->priv;
122 122
123 if (IS_ALIGNED((unsigned long)buf, 2)) { 123 if (IS_ALIGNED((unsigned long)buf, 2)) {
124 readsw(this->IO_ADDR_R, buf, len>>1); 124 ioread16_rep(this->IO_ADDR_R, buf, len>>1);
125 } else { 125 } else {
126 int i; 126 int i;
127 unsigned short *ptr = (unsigned short *)buf; 127 unsigned short *ptr = (unsigned short *)buf;
@@ -134,7 +134,11 @@ static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
134static int gpio_nand_devready(struct mtd_info *mtd) 134static int gpio_nand_devready(struct mtd_info *mtd)
135{ 135{
136 struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd); 136 struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
137 return gpio_get_value(gpiomtd->plat.gpio_rdy); 137
138 if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
139 return gpio_get_value(gpiomtd->plat.gpio_rdy);
140
141 return 1;
138} 142}
139 143
140#ifdef CONFIG_OF 144#ifdef CONFIG_OF
@@ -227,7 +231,7 @@ gpio_nand_get_io_sync(struct platform_device *pdev)
227 return platform_get_resource(pdev, IORESOURCE_MEM, 1); 231 return platform_get_resource(pdev, IORESOURCE_MEM, 1);
228} 232}
229 233
230static int __devexit gpio_nand_remove(struct platform_device *dev) 234static int gpio_nand_remove(struct platform_device *dev)
231{ 235{
232 struct gpiomtd *gpiomtd = platform_get_drvdata(dev); 236 struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
233 struct resource *res; 237 struct resource *res;
@@ -252,7 +256,8 @@ static int __devexit gpio_nand_remove(struct platform_device *dev)
252 gpio_free(gpiomtd->plat.gpio_nce); 256 gpio_free(gpiomtd->plat.gpio_nce);
253 if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) 257 if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
254 gpio_free(gpiomtd->plat.gpio_nwp); 258 gpio_free(gpiomtd->plat.gpio_nwp);
255 gpio_free(gpiomtd->plat.gpio_rdy); 259 if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
260 gpio_free(gpiomtd->plat.gpio_rdy);
256 261
257 kfree(gpiomtd); 262 kfree(gpiomtd);
258 263
@@ -277,7 +282,7 @@ static void __iomem *request_and_remap(struct resource *res, size_t size,
277 return ptr; 282 return ptr;
278} 283}
279 284
280static int __devinit gpio_nand_probe(struct platform_device *dev) 285static int gpio_nand_probe(struct platform_device *dev)
281{ 286{
282 struct gpiomtd *gpiomtd; 287 struct gpiomtd *gpiomtd;
283 struct nand_chip *this; 288 struct nand_chip *this;
@@ -336,10 +341,12 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
336 if (ret) 341 if (ret)
337 goto err_cle; 342 goto err_cle;
338 gpio_direction_output(gpiomtd->plat.gpio_cle, 0); 343 gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
339 ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY"); 344 if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
340 if (ret) 345 ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY");
341 goto err_rdy; 346 if (ret)
342 gpio_direction_input(gpiomtd->plat.gpio_rdy); 347 goto err_rdy;
348 gpio_direction_input(gpiomtd->plat.gpio_rdy);
349 }
343 350
344 351
345 this->IO_ADDR_W = this->IO_ADDR_R; 352 this->IO_ADDR_W = this->IO_ADDR_R;
@@ -386,7 +393,8 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
386err_wp: 393err_wp:
387 if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) 394 if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
388 gpio_set_value(gpiomtd->plat.gpio_nwp, 0); 395 gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
389 gpio_free(gpiomtd->plat.gpio_rdy); 396 if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
397 gpio_free(gpiomtd->plat.gpio_rdy);
390err_rdy: 398err_rdy:
391 gpio_free(gpiomtd->plat.gpio_cle); 399 gpio_free(gpiomtd->plat.gpio_cle);
392err_cle: 400err_cle:
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 3502accd4bc3..d84699c7968e 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -18,7 +18,6 @@
18 * with this program; if not, write to the Free Software Foundation, Inc., 18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */ 20 */
21#include <linux/mtd/gpmi-nand.h>
22#include <linux/delay.h> 21#include <linux/delay.h>
23#include <linux/clk.h> 22#include <linux/clk.h>
24 23
@@ -166,6 +165,15 @@ int gpmi_init(struct gpmi_nand_data *this)
166 if (ret) 165 if (ret)
167 goto err_out; 166 goto err_out;
168 167
168 /*
169 * Reset BCH here, too. We got failures otherwise :(
170 * See later BCH reset for explanation of MX23 handling
171 */
172 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
173 if (ret)
174 goto err_out;
175
176
169 /* Choose NAND mode. */ 177 /* Choose NAND mode. */
170 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR); 178 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
171 179
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index d79696b2f19b..5cd141f7bfc2 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -25,7 +25,6 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/mtd/gpmi-nand.h>
29#include <linux/mtd/partitions.h> 28#include <linux/mtd/partitions.h>
30#include <linux/pinctrl/consumer.h> 29#include <linux/pinctrl/consumer.h>
31#include <linux/of.h> 30#include <linux/of.h>
@@ -33,6 +32,12 @@
33#include <linux/of_mtd.h> 32#include <linux/of_mtd.h>
34#include "gpmi-nand.h" 33#include "gpmi-nand.h"
35 34
35/* Resource names for the GPMI NAND driver. */
36#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
37#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
38#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
39#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "gpmi-dma"
40
36/* add our owner bbt descriptor */ 41/* add our owner bbt descriptor */
37static uint8_t scan_ff_pattern[] = { 0xff }; 42static uint8_t scan_ff_pattern[] = { 0xff };
38static struct nand_bbt_descr gpmi_bbt_descr = { 43static struct nand_bbt_descr gpmi_bbt_descr = {
@@ -222,7 +227,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
222 227
223 ret = dma_map_sg(this->dev, sgl, 1, dr); 228 ret = dma_map_sg(this->dev, sgl, 1, dr);
224 if (ret == 0) 229 if (ret == 0)
225 pr_err("map failed.\n"); 230 pr_err("DMA mapping failed.\n");
226 231
227 this->direct_dma_map_ok = false; 232 this->direct_dma_map_ok = false;
228 } 233 }
@@ -314,7 +319,7 @@ int start_dma_with_bch_irq(struct gpmi_nand_data *this,
314 return 0; 319 return 0;
315} 320}
316 321
317static int __devinit 322static int
318acquire_register_block(struct gpmi_nand_data *this, const char *res_name) 323acquire_register_block(struct gpmi_nand_data *this, const char *res_name)
319{ 324{
320 struct platform_device *pdev = this->pdev; 325 struct platform_device *pdev = this->pdev;
@@ -355,7 +360,7 @@ static void release_register_block(struct gpmi_nand_data *this)
355 res->bch_regs = NULL; 360 res->bch_regs = NULL;
356} 361}
357 362
358static int __devinit 363static int
359acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h) 364acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
360{ 365{
361 struct platform_device *pdev = this->pdev; 366 struct platform_device *pdev = this->pdev;
@@ -422,7 +427,7 @@ static void release_dma_channels(struct gpmi_nand_data *this)
422 } 427 }
423} 428}
424 429
425static int __devinit acquire_dma_channels(struct gpmi_nand_data *this) 430static int acquire_dma_channels(struct gpmi_nand_data *this)
426{ 431{
427 struct platform_device *pdev = this->pdev; 432 struct platform_device *pdev = this->pdev;
428 struct resource *r_dma; 433 struct resource *r_dma;
@@ -456,7 +461,7 @@ static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
456 461
457 dma_chan = dma_request_channel(mask, gpmi_dma_filter, this); 462 dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
458 if (!dma_chan) { 463 if (!dma_chan) {
459 pr_err("dma_request_channel failed.\n"); 464 pr_err("Failed to request DMA channel.\n");
460 goto acquire_err; 465 goto acquire_err;
461 } 466 }
462 467
@@ -487,7 +492,7 @@ static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {
487 "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch", 492 "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
488}; 493};
489 494
490static int __devinit gpmi_get_clks(struct gpmi_nand_data *this) 495static int gpmi_get_clks(struct gpmi_nand_data *this)
491{ 496{
492 struct resources *r = &this->resources; 497 struct resources *r = &this->resources;
493 char **extra_clks = NULL; 498 char **extra_clks = NULL;
@@ -533,7 +538,7 @@ err_clock:
533 return -ENOMEM; 538 return -ENOMEM;
534} 539}
535 540
536static int __devinit acquire_resources(struct gpmi_nand_data *this) 541static int acquire_resources(struct gpmi_nand_data *this)
537{ 542{
538 struct pinctrl *pinctrl; 543 struct pinctrl *pinctrl;
539 int ret; 544 int ret;
@@ -583,7 +588,7 @@ static void release_resources(struct gpmi_nand_data *this)
583 release_dma_channels(this); 588 release_dma_channels(this);
584} 589}
585 590
586static int __devinit init_hardware(struct gpmi_nand_data *this) 591static int init_hardware(struct gpmi_nand_data *this)
587{ 592{
588 int ret; 593 int ret;
589 594
@@ -625,7 +630,8 @@ static int read_page_prepare(struct gpmi_nand_data *this,
625 length, DMA_FROM_DEVICE); 630 length, DMA_FROM_DEVICE);
626 if (dma_mapping_error(dev, dest_phys)) { 631 if (dma_mapping_error(dev, dest_phys)) {
627 if (alt_size < length) { 632 if (alt_size < length) {
628 pr_err("Alternate buffer is too small\n"); 633 pr_err("%s, Alternate buffer is too small\n",
634 __func__);
629 return -ENOMEM; 635 return -ENOMEM;
630 } 636 }
631 goto map_failed; 637 goto map_failed;
@@ -675,7 +681,8 @@ static int send_page_prepare(struct gpmi_nand_data *this,
675 DMA_TO_DEVICE); 681 DMA_TO_DEVICE);
676 if (dma_mapping_error(dev, source_phys)) { 682 if (dma_mapping_error(dev, source_phys)) {
677 if (alt_size < length) { 683 if (alt_size < length) {
678 pr_err("Alternate buffer is too small\n"); 684 pr_err("%s, Alternate buffer is too small\n",
685 __func__);
679 return -ENOMEM; 686 return -ENOMEM;
680 } 687 }
681 goto map_failed; 688 goto map_failed;
@@ -763,7 +770,7 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
763 770
764error_alloc: 771error_alloc:
765 gpmi_free_dma_buffer(this); 772 gpmi_free_dma_buffer(this);
766 pr_err("allocate DMA buffer ret!!\n"); 773 pr_err("Error allocating DMA buffers!\n");
767 return -ENOMEM; 774 return -ENOMEM;
768} 775}
769 776
@@ -1474,7 +1481,7 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this)
1474 /* Set up the NFC geometry which is used by BCH. */ 1481 /* Set up the NFC geometry which is used by BCH. */
1475 ret = bch_set_geometry(this); 1482 ret = bch_set_geometry(this);
1476 if (ret) { 1483 if (ret) {
1477 pr_err("set geometry ret : %d\n", ret); 1484 pr_err("Error setting BCH geometry : %d\n", ret);
1478 return ret; 1485 return ret;
1479 } 1486 }
1480 1487
@@ -1535,7 +1542,7 @@ static void gpmi_nfc_exit(struct gpmi_nand_data *this)
1535 gpmi_free_dma_buffer(this); 1542 gpmi_free_dma_buffer(this);
1536} 1543}
1537 1544
1538static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this) 1545static int gpmi_nfc_init(struct gpmi_nand_data *this)
1539{ 1546{
1540 struct mtd_info *mtd = &this->mtd; 1547 struct mtd_info *mtd = &this->mtd;
1541 struct nand_chip *chip = &this->nand; 1548 struct nand_chip *chip = &this->nand;
@@ -1618,7 +1625,7 @@ static const struct of_device_id gpmi_nand_id_table[] = {
1618}; 1625};
1619MODULE_DEVICE_TABLE(of, gpmi_nand_id_table); 1626MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
1620 1627
1621static int __devinit gpmi_nand_probe(struct platform_device *pdev) 1628static int gpmi_nand_probe(struct platform_device *pdev)
1622{ 1629{
1623 struct gpmi_nand_data *this; 1630 struct gpmi_nand_data *this;
1624 const struct of_device_id *of_id; 1631 const struct of_device_id *of_id;
@@ -1668,7 +1675,7 @@ exit_acquire_resources:
1668 return ret; 1675 return ret;
1669} 1676}
1670 1677
1671static int __devexit gpmi_nand_remove(struct platform_device *pdev) 1678static int gpmi_nand_remove(struct platform_device *pdev)
1672{ 1679{
1673 struct gpmi_nand_data *this = platform_get_drvdata(pdev); 1680 struct gpmi_nand_data *this = platform_get_drvdata(pdev);
1674 1681
@@ -1685,7 +1692,7 @@ static struct platform_driver gpmi_nand_driver = {
1685 .of_match_table = gpmi_nand_id_table, 1692 .of_match_table = gpmi_nand_id_table,
1686 }, 1693 },
1687 .probe = gpmi_nand_probe, 1694 .probe = gpmi_nand_probe,
1688 .remove = __devexit_p(gpmi_nand_remove), 1695 .remove = gpmi_nand_remove,
1689 .id_table = gpmi_ids, 1696 .id_table = gpmi_ids,
1690}; 1697};
1691module_platform_driver(gpmi_nand_driver); 1698module_platform_driver(gpmi_nand_driver);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index 7ac25c1e58f9..3d93a5e39090 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -130,7 +130,6 @@ struct gpmi_nand_data {
130 /* System Interface */ 130 /* System Interface */
131 struct device *dev; 131 struct device *dev;
132 struct platform_device *pdev; 132 struct platform_device *pdev;
133 struct gpmi_nand_platform_data *pdata;
134 133
135 /* Resources */ 134 /* Resources */
136 struct resources resources; 135 struct resources resources;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 100b6775e175..8d415f014e1d 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -316,13 +316,17 @@ err:
316 return ret; 316 return ret;
317} 317}
318 318
319static inline void jz_nand_iounmap_resource(struct resource *res, void __iomem *base) 319static inline void jz_nand_iounmap_resource(struct resource *res,
320 void __iomem *base)
320{ 321{
321 iounmap(base); 322 iounmap(base);
322 release_mem_region(res->start, resource_size(res)); 323 release_mem_region(res->start, resource_size(res));
323} 324}
324 325
325static int __devinit jz_nand_detect_bank(struct platform_device *pdev, struct jz_nand *nand, unsigned char bank, size_t chipnr, uint8_t *nand_maf_id, uint8_t *nand_dev_id) { 326static int jz_nand_detect_bank(struct platform_device *pdev,
327 struct jz_nand *nand, unsigned char bank,
328 size_t chipnr, uint8_t *nand_maf_id,
329 uint8_t *nand_dev_id) {
326 int ret; 330 int ret;
327 int gpio; 331 int gpio;
328 char gpio_name[9]; 332 char gpio_name[9];
@@ -400,7 +404,7 @@ notfound_gpio:
400 return ret; 404 return ret;
401} 405}
402 406
403static int __devinit jz_nand_probe(struct platform_device *pdev) 407static int jz_nand_probe(struct platform_device *pdev)
404{ 408{
405 int ret; 409 int ret;
406 struct jz_nand *nand; 410 struct jz_nand *nand;
@@ -541,7 +545,7 @@ err_free:
541 return ret; 545 return ret;
542} 546}
543 547
544static int __devexit jz_nand_remove(struct platform_device *pdev) 548static int jz_nand_remove(struct platform_device *pdev)
545{ 549{
546 struct jz_nand *nand = platform_get_drvdata(pdev); 550 struct jz_nand *nand = platform_get_drvdata(pdev);
547 struct jz_nand_platform_data *pdata = pdev->dev.platform_data; 551 struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
@@ -573,7 +577,7 @@ static int __devexit jz_nand_remove(struct platform_device *pdev)
573 577
574static struct platform_driver jz_nand_driver = { 578static struct platform_driver jz_nand_driver = {
575 .probe = jz_nand_probe, 579 .probe = jz_nand_probe,
576 .remove = __devexit_p(jz_nand_remove), 580 .remove = jz_nand_remove,
577 .driver = { 581 .driver = {
578 .name = "jz4740-nand", 582 .name = "jz4740-nand",
579 .owner = THIS_MODULE, 583 .owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index c29b7ac1f6af..f182befa7360 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -655,7 +655,7 @@ static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
655/* 655/*
656 * Probe for NAND controller 656 * Probe for NAND controller
657 */ 657 */
658static int __devinit lpc32xx_nand_probe(struct platform_device *pdev) 658static int lpc32xx_nand_probe(struct platform_device *pdev)
659{ 659{
660 struct lpc32xx_nand_host *host; 660 struct lpc32xx_nand_host *host;
661 struct mtd_info *mtd; 661 struct mtd_info *mtd;
@@ -845,7 +845,7 @@ err_exit1:
845/* 845/*
846 * Remove NAND device 846 * Remove NAND device
847 */ 847 */
848static int __devexit lpc32xx_nand_remove(struct platform_device *pdev) 848static int lpc32xx_nand_remove(struct platform_device *pdev)
849{ 849{
850 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); 850 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
851 struct mtd_info *mtd = &host->mtd; 851 struct mtd_info *mtd = &host->mtd;
@@ -907,7 +907,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
907 907
908static struct platform_driver lpc32xx_nand_driver = { 908static struct platform_driver lpc32xx_nand_driver = {
909 .probe = lpc32xx_nand_probe, 909 .probe = lpc32xx_nand_probe,
910 .remove = __devexit_p(lpc32xx_nand_remove), 910 .remove = lpc32xx_nand_remove,
911 .resume = lpc32xx_nand_resume, 911 .resume = lpc32xx_nand_resume,
912 .suspend = lpc32xx_nand_suspend, 912 .suspend = lpc32xx_nand_suspend,
913 .driver = { 913 .driver = {
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index 32409c45d479..030b78c62895 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -755,7 +755,7 @@ static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
755/* 755/*
756 * Probe for NAND controller 756 * Probe for NAND controller
757 */ 757 */
758static int __devinit lpc32xx_nand_probe(struct platform_device *pdev) 758static int lpc32xx_nand_probe(struct platform_device *pdev)
759{ 759{
760 struct lpc32xx_nand_host *host; 760 struct lpc32xx_nand_host *host;
761 struct mtd_info *mtd; 761 struct mtd_info *mtd;
@@ -949,7 +949,7 @@ err_exit1:
949/* 949/*
950 * Remove NAND device. 950 * Remove NAND device.
951 */ 951 */
952static int __devexit lpc32xx_nand_remove(struct platform_device *pdev) 952static int lpc32xx_nand_remove(struct platform_device *pdev)
953{ 953{
954 uint32_t tmp; 954 uint32_t tmp;
955 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); 955 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
@@ -1021,7 +1021,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
1021 1021
1022static struct platform_driver lpc32xx_nand_driver = { 1022static struct platform_driver lpc32xx_nand_driver = {
1023 .probe = lpc32xx_nand_probe, 1023 .probe = lpc32xx_nand_probe,
1024 .remove = __devexit_p(lpc32xx_nand_remove), 1024 .remove = lpc32xx_nand_remove,
1025 .resume = lpc32xx_nand_resume, 1025 .resume = lpc32xx_nand_resume,
1026 .suspend = lpc32xx_nand_suspend, 1026 .suspend = lpc32xx_nand_suspend,
1027 .driver = { 1027 .driver = {
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index f776c8577b8c..3c9cdcbc4cba 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -626,7 +626,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
626 iounmap(prv->csreg); 626 iounmap(prv->csreg);
627} 627}
628 628
629static int __devinit mpc5121_nfc_probe(struct platform_device *op) 629static int mpc5121_nfc_probe(struct platform_device *op)
630{ 630{
631 struct device_node *rootnode, *dn = op->dev.of_node; 631 struct device_node *rootnode, *dn = op->dev.of_node;
632 struct device *dev = &op->dev; 632 struct device *dev = &op->dev;
@@ -827,7 +827,7 @@ error:
827 return retval; 827 return retval;
828} 828}
829 829
830static int __devexit mpc5121_nfc_remove(struct platform_device *op) 830static int mpc5121_nfc_remove(struct platform_device *op)
831{ 831{
832 struct device *dev = &op->dev; 832 struct device *dev = &op->dev;
833 struct mtd_info *mtd = dev_get_drvdata(dev); 833 struct mtd_info *mtd = dev_get_drvdata(dev);
@@ -841,14 +841,14 @@ static int __devexit mpc5121_nfc_remove(struct platform_device *op)
841 return 0; 841 return 0;
842} 842}
843 843
844static struct of_device_id mpc5121_nfc_match[] __devinitdata = { 844static struct of_device_id mpc5121_nfc_match[] = {
845 { .compatible = "fsl,mpc5121-nfc", }, 845 { .compatible = "fsl,mpc5121-nfc", },
846 {}, 846 {},
847}; 847};
848 848
849static struct platform_driver mpc5121_nfc_driver = { 849static struct platform_driver mpc5121_nfc_driver = {
850 .probe = mpc5121_nfc_probe, 850 .probe = mpc5121_nfc_probe,
851 .remove = __devexit_p(mpc5121_nfc_remove), 851 .remove = mpc5121_nfc_remove,
852 .driver = { 852 .driver = {
853 .name = DRV_NAME, 853 .name = DRV_NAME,
854 .owner = THIS_MODULE, 854 .owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 022dcdc256fb..45204e41a028 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -266,7 +266,8 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
266 } 266 }
267}; 267};
268 268
269static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL }; 269static const char const *part_probes[] = {
270 "cmdlinepart", "RedBoot", "ofpart", NULL };
270 271
271static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size) 272static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
272{ 273{
@@ -1378,7 +1379,7 @@ static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
1378} 1379}
1379#endif 1380#endif
1380 1381
1381static int __devinit mxcnd_probe(struct platform_device *pdev) 1382static int mxcnd_probe(struct platform_device *pdev)
1382{ 1383{
1383 struct nand_chip *this; 1384 struct nand_chip *this;
1384 struct mtd_info *mtd; 1385 struct mtd_info *mtd;
@@ -1556,12 +1557,13 @@ static int __devinit mxcnd_probe(struct platform_device *pdev)
1556 return 0; 1557 return 0;
1557 1558
1558escan: 1559escan:
1559 clk_disable_unprepare(host->clk); 1560 if (host->clk_act)
1561 clk_disable_unprepare(host->clk);
1560 1562
1561 return err; 1563 return err;
1562} 1564}
1563 1565
1564static int __devexit mxcnd_remove(struct platform_device *pdev) 1566static int mxcnd_remove(struct platform_device *pdev)
1565{ 1567{
1566 struct mxc_nand_host *host = platform_get_drvdata(pdev); 1568 struct mxc_nand_host *host = platform_get_drvdata(pdev);
1567 1569
@@ -1580,7 +1582,7 @@ static struct platform_driver mxcnd_driver = {
1580 }, 1582 },
1581 .id_table = mxcnd_devtype, 1583 .id_table = mxcnd_devtype,
1582 .probe = mxcnd_probe, 1584 .probe = mxcnd_probe,
1583 .remove = __devexit_p(mxcnd_remove), 1585 .remove = mxcnd_remove,
1584}; 1586};
1585module_platform_driver(mxcnd_driver); 1587module_platform_driver(mxcnd_driver);
1586 1588
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 1a03b7f673ce..8323ac991ad1 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -93,8 +93,7 @@ static struct nand_ecclayout nand_oob_128 = {
93 .length = 78} } 93 .length = 78} }
94}; 94};
95 95
96static int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, 96static int nand_get_device(struct mtd_info *mtd, int new_state);
97 int new_state);
98 97
99static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, 98static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
100 struct mtd_oob_ops *ops); 99 struct mtd_oob_ops *ops);
@@ -130,15 +129,12 @@ static int check_offs_len(struct mtd_info *mtd,
130 * nand_release_device - [GENERIC] release chip 129 * nand_release_device - [GENERIC] release chip
131 * @mtd: MTD device structure 130 * @mtd: MTD device structure
132 * 131 *
133 * Deselect, release chip lock and wake up anyone waiting on the device. 132 * Release chip lock and wake up anyone waiting on the device.
134 */ 133 */
135static void nand_release_device(struct mtd_info *mtd) 134static void nand_release_device(struct mtd_info *mtd)
136{ 135{
137 struct nand_chip *chip = mtd->priv; 136 struct nand_chip *chip = mtd->priv;
138 137
139 /* De-select the NAND device */
140 chip->select_chip(mtd, -1);
141
142 /* Release the controller and the chip */ 138 /* Release the controller and the chip */
143 spin_lock(&chip->controller->lock); 139 spin_lock(&chip->controller->lock);
144 chip->controller->active = NULL; 140 chip->controller->active = NULL;
@@ -160,7 +156,7 @@ static uint8_t nand_read_byte(struct mtd_info *mtd)
160} 156}
161 157
162/** 158/**
163 * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip 159 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
164 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip 160 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
165 * @mtd: MTD device structure 161 * @mtd: MTD device structure
166 * 162 *
@@ -303,7 +299,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
303 if (getchip) { 299 if (getchip) {
304 chipnr = (int)(ofs >> chip->chip_shift); 300 chipnr = (int)(ofs >> chip->chip_shift);
305 301
306 nand_get_device(chip, mtd, FL_READING); 302 nand_get_device(mtd, FL_READING);
307 303
308 /* Select the NAND device */ 304 /* Select the NAND device */
309 chip->select_chip(mtd, chipnr); 305 chip->select_chip(mtd, chipnr);
@@ -333,8 +329,10 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
333 i++; 329 i++;
334 } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE)); 330 } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
335 331
336 if (getchip) 332 if (getchip) {
333 chip->select_chip(mtd, -1);
337 nand_release_device(mtd); 334 nand_release_device(mtd);
335 }
338 336
339 return res; 337 return res;
340} 338}
@@ -383,7 +381,7 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
383 struct mtd_oob_ops ops; 381 struct mtd_oob_ops ops;
384 loff_t wr_ofs = ofs; 382 loff_t wr_ofs = ofs;
385 383
386 nand_get_device(chip, mtd, FL_WRITING); 384 nand_get_device(mtd, FL_WRITING);
387 385
388 ops.datbuf = NULL; 386 ops.datbuf = NULL;
389 ops.oobbuf = buf; 387 ops.oobbuf = buf;
@@ -492,7 +490,7 @@ static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
492void nand_wait_ready(struct mtd_info *mtd) 490void nand_wait_ready(struct mtd_info *mtd)
493{ 491{
494 struct nand_chip *chip = mtd->priv; 492 struct nand_chip *chip = mtd->priv;
495 unsigned long timeo = jiffies + 2; 493 unsigned long timeo = jiffies + msecs_to_jiffies(20);
496 494
497 /* 400ms timeout */ 495 /* 400ms timeout */
498 if (in_interrupt() || oops_in_progress) 496 if (in_interrupt() || oops_in_progress)
@@ -750,15 +748,15 @@ static void panic_nand_get_device(struct nand_chip *chip,
750 748
751/** 749/**
752 * nand_get_device - [GENERIC] Get chip for selected access 750 * nand_get_device - [GENERIC] Get chip for selected access
753 * @chip: the nand chip descriptor
754 * @mtd: MTD device structure 751 * @mtd: MTD device structure
755 * @new_state: the state which is requested 752 * @new_state: the state which is requested
756 * 753 *
757 * Get the device and lock it for exclusive access 754 * Get the device and lock it for exclusive access
758 */ 755 */
759static int 756static int
760nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state) 757nand_get_device(struct mtd_info *mtd, int new_state)
761{ 758{
759 struct nand_chip *chip = mtd->priv;
762 spinlock_t *lock = &chip->controller->lock; 760 spinlock_t *lock = &chip->controller->lock;
763 wait_queue_head_t *wq = &chip->controller->wq; 761 wait_queue_head_t *wq = &chip->controller->wq;
764 DECLARE_WAITQUEUE(wait, current); 762 DECLARE_WAITQUEUE(wait, current);
@@ -865,6 +863,8 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
865 led_trigger_event(nand_led_trigger, LED_OFF); 863 led_trigger_event(nand_led_trigger, LED_OFF);
866 864
867 status = (int)chip->read_byte(mtd); 865 status = (int)chip->read_byte(mtd);
866 /* This can happen if in case of timeout or buggy dev_ready */
867 WARN_ON(!(status & NAND_STATUS_READY));
868 return status; 868 return status;
869} 869}
870 870
@@ -899,7 +899,7 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
899 /* Call wait ready function */ 899 /* Call wait ready function */
900 status = chip->waitfunc(mtd, chip); 900 status = chip->waitfunc(mtd, chip);
901 /* See if device thinks it succeeded */ 901 /* See if device thinks it succeeded */
902 if (status & 0x01) { 902 if (status & NAND_STATUS_FAIL) {
903 pr_debug("%s: error status = 0x%08x\n", 903 pr_debug("%s: error status = 0x%08x\n",
904 __func__, status); 904 __func__, status);
905 ret = -EIO; 905 ret = -EIO;
@@ -932,7 +932,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
932 if (ofs + len == mtd->size) 932 if (ofs + len == mtd->size)
933 len -= mtd->erasesize; 933 len -= mtd->erasesize;
934 934
935 nand_get_device(chip, mtd, FL_UNLOCKING); 935 nand_get_device(mtd, FL_UNLOCKING);
936 936
937 /* Shift to get chip number */ 937 /* Shift to get chip number */
938 chipnr = ofs >> chip->chip_shift; 938 chipnr = ofs >> chip->chip_shift;
@@ -950,6 +950,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
950 ret = __nand_unlock(mtd, ofs, len, 0); 950 ret = __nand_unlock(mtd, ofs, len, 0);
951 951
952out: 952out:
953 chip->select_chip(mtd, -1);
953 nand_release_device(mtd); 954 nand_release_device(mtd);
954 955
955 return ret; 956 return ret;
@@ -981,7 +982,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
981 if (check_offs_len(mtd, ofs, len)) 982 if (check_offs_len(mtd, ofs, len))
982 ret = -EINVAL; 983 ret = -EINVAL;
983 984
984 nand_get_device(chip, mtd, FL_LOCKING); 985 nand_get_device(mtd, FL_LOCKING);
985 986
986 /* Shift to get chip number */ 987 /* Shift to get chip number */
987 chipnr = ofs >> chip->chip_shift; 988 chipnr = ofs >> chip->chip_shift;
@@ -1004,7 +1005,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1004 /* Call wait ready function */ 1005 /* Call wait ready function */
1005 status = chip->waitfunc(mtd, chip); 1006 status = chip->waitfunc(mtd, chip);
1006 /* See if device thinks it succeeded */ 1007 /* See if device thinks it succeeded */
1007 if (status & 0x01) { 1008 if (status & NAND_STATUS_FAIL) {
1008 pr_debug("%s: error status = 0x%08x\n", 1009 pr_debug("%s: error status = 0x%08x\n",
1009 __func__, status); 1010 __func__, status);
1010 ret = -EIO; 1011 ret = -EIO;
@@ -1014,6 +1015,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1014 ret = __nand_unlock(mtd, ofs, len, 0x1); 1015 ret = __nand_unlock(mtd, ofs, len, 0x1);
1015 1016
1016out: 1017out:
1018 chip->select_chip(mtd, -1);
1017 nand_release_device(mtd); 1019 nand_release_device(mtd);
1018 1020
1019 return ret; 1021 return ret;
@@ -1550,6 +1552,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1550 chip->select_chip(mtd, chipnr); 1552 chip->select_chip(mtd, chipnr);
1551 } 1553 }
1552 } 1554 }
1555 chip->select_chip(mtd, -1);
1553 1556
1554 ops->retlen = ops->len - (size_t) readlen; 1557 ops->retlen = ops->len - (size_t) readlen;
1555 if (oob) 1558 if (oob)
@@ -1577,11 +1580,10 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1577static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, 1580static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1578 size_t *retlen, uint8_t *buf) 1581 size_t *retlen, uint8_t *buf)
1579{ 1582{
1580 struct nand_chip *chip = mtd->priv;
1581 struct mtd_oob_ops ops; 1583 struct mtd_oob_ops ops;
1582 int ret; 1584 int ret;
1583 1585
1584 nand_get_device(chip, mtd, FL_READING); 1586 nand_get_device(mtd, FL_READING);
1585 ops.len = len; 1587 ops.len = len;
1586 ops.datbuf = buf; 1588 ops.datbuf = buf;
1587 ops.oobbuf = NULL; 1589 ops.oobbuf = NULL;
@@ -1804,6 +1806,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1804 chip->select_chip(mtd, chipnr); 1806 chip->select_chip(mtd, chipnr);
1805 } 1807 }
1806 } 1808 }
1809 chip->select_chip(mtd, -1);
1807 1810
1808 ops->oobretlen = ops->ooblen - readlen; 1811 ops->oobretlen = ops->ooblen - readlen;
1809 1812
@@ -1827,7 +1830,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1827static int nand_read_oob(struct mtd_info *mtd, loff_t from, 1830static int nand_read_oob(struct mtd_info *mtd, loff_t from,
1828 struct mtd_oob_ops *ops) 1831 struct mtd_oob_ops *ops)
1829{ 1832{
1830 struct nand_chip *chip = mtd->priv;
1831 int ret = -ENOTSUPP; 1833 int ret = -ENOTSUPP;
1832 1834
1833 ops->retlen = 0; 1835 ops->retlen = 0;
@@ -1839,7 +1841,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
1839 return -EINVAL; 1841 return -EINVAL;
1840 } 1842 }
1841 1843
1842 nand_get_device(chip, mtd, FL_READING); 1844 nand_get_device(mtd, FL_READING);
1843 1845
1844 switch (ops->mode) { 1846 switch (ops->mode) {
1845 case MTD_OPS_PLACE_OOB: 1847 case MTD_OPS_PLACE_OOB:
@@ -2186,8 +2188,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2186 chip->select_chip(mtd, chipnr); 2188 chip->select_chip(mtd, chipnr);
2187 2189
2188 /* Check, if it is write protected */ 2190 /* Check, if it is write protected */
2189 if (nand_check_wp(mtd)) 2191 if (nand_check_wp(mtd)) {
2190 return -EIO; 2192 ret = -EIO;
2193 goto err_out;
2194 }
2191 2195
2192 realpage = (int)(to >> chip->page_shift); 2196 realpage = (int)(to >> chip->page_shift);
2193 page = realpage & chip->pagemask; 2197 page = realpage & chip->pagemask;
@@ -2199,8 +2203,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2199 chip->pagebuf = -1; 2203 chip->pagebuf = -1;
2200 2204
2201 /* Don't allow multipage oob writes with offset */ 2205 /* Don't allow multipage oob writes with offset */
2202 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) 2206 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
2203 return -EINVAL; 2207 ret = -EINVAL;
2208 goto err_out;
2209 }
2204 2210
2205 while (1) { 2211 while (1) {
2206 int bytes = mtd->writesize; 2212 int bytes = mtd->writesize;
@@ -2251,6 +2257,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2251 ops->retlen = ops->len - writelen; 2257 ops->retlen = ops->len - writelen;
2252 if (unlikely(oob)) 2258 if (unlikely(oob))
2253 ops->oobretlen = ops->ooblen; 2259 ops->oobretlen = ops->ooblen;
2260
2261err_out:
2262 chip->select_chip(mtd, -1);
2254 return ret; 2263 return ret;
2255} 2264}
2256 2265
@@ -2302,11 +2311,10 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2302static int nand_write(struct mtd_info *mtd, loff_t to, size_t len, 2311static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2303 size_t *retlen, const uint8_t *buf) 2312 size_t *retlen, const uint8_t *buf)
2304{ 2313{
2305 struct nand_chip *chip = mtd->priv;
2306 struct mtd_oob_ops ops; 2314 struct mtd_oob_ops ops;
2307 int ret; 2315 int ret;
2308 2316
2309 nand_get_device(chip, mtd, FL_WRITING); 2317 nand_get_device(mtd, FL_WRITING);
2310 ops.len = len; 2318 ops.len = len;
2311 ops.datbuf = (uint8_t *)buf; 2319 ops.datbuf = (uint8_t *)buf;
2312 ops.oobbuf = NULL; 2320 ops.oobbuf = NULL;
@@ -2377,8 +2385,10 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2377 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 2385 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
2378 2386
2379 /* Check, if it is write protected */ 2387 /* Check, if it is write protected */
2380 if (nand_check_wp(mtd)) 2388 if (nand_check_wp(mtd)) {
2389 chip->select_chip(mtd, -1);
2381 return -EROFS; 2390 return -EROFS;
2391 }
2382 2392
2383 /* Invalidate the page cache, if we write to the cached page */ 2393 /* Invalidate the page cache, if we write to the cached page */
2384 if (page == chip->pagebuf) 2394 if (page == chip->pagebuf)
@@ -2391,6 +2401,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2391 else 2401 else
2392 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); 2402 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
2393 2403
2404 chip->select_chip(mtd, -1);
2405
2394 if (status) 2406 if (status)
2395 return status; 2407 return status;
2396 2408
@@ -2408,7 +2420,6 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2408static int nand_write_oob(struct mtd_info *mtd, loff_t to, 2420static int nand_write_oob(struct mtd_info *mtd, loff_t to,
2409 struct mtd_oob_ops *ops) 2421 struct mtd_oob_ops *ops)
2410{ 2422{
2411 struct nand_chip *chip = mtd->priv;
2412 int ret = -ENOTSUPP; 2423 int ret = -ENOTSUPP;
2413 2424
2414 ops->retlen = 0; 2425 ops->retlen = 0;
@@ -2420,7 +2431,7 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
2420 return -EINVAL; 2431 return -EINVAL;
2421 } 2432 }
2422 2433
2423 nand_get_device(chip, mtd, FL_WRITING); 2434 nand_get_device(mtd, FL_WRITING);
2424 2435
2425 switch (ops->mode) { 2436 switch (ops->mode) {
2426 case MTD_OPS_PLACE_OOB: 2437 case MTD_OPS_PLACE_OOB:
@@ -2513,7 +2524,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2513 return -EINVAL; 2524 return -EINVAL;
2514 2525
2515 /* Grab the lock and see if the device is available */ 2526 /* Grab the lock and see if the device is available */
2516 nand_get_device(chip, mtd, FL_ERASING); 2527 nand_get_device(mtd, FL_ERASING);
2517 2528
2518 /* Shift to get first page */ 2529 /* Shift to get first page */
2519 page = (int)(instr->addr >> chip->page_shift); 2530 page = (int)(instr->addr >> chip->page_shift);
@@ -2623,6 +2634,7 @@ erase_exit:
2623 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO; 2634 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
2624 2635
2625 /* Deselect and wake up anyone waiting on the device */ 2636 /* Deselect and wake up anyone waiting on the device */
2637 chip->select_chip(mtd, -1);
2626 nand_release_device(mtd); 2638 nand_release_device(mtd);
2627 2639
2628 /* Do call back function */ 2640 /* Do call back function */
@@ -2658,12 +2670,10 @@ erase_exit:
2658 */ 2670 */
2659static void nand_sync(struct mtd_info *mtd) 2671static void nand_sync(struct mtd_info *mtd)
2660{ 2672{
2661 struct nand_chip *chip = mtd->priv;
2662
2663 pr_debug("%s: called\n", __func__); 2673 pr_debug("%s: called\n", __func__);
2664 2674
2665 /* Grab the lock and see if the device is available */ 2675 /* Grab the lock and see if the device is available */
2666 nand_get_device(chip, mtd, FL_SYNCING); 2676 nand_get_device(mtd, FL_SYNCING);
2667 /* Release it and go back */ 2677 /* Release it and go back */
2668 nand_release_device(mtd); 2678 nand_release_device(mtd);
2669} 2679}
@@ -2749,9 +2759,7 @@ static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
2749 */ 2759 */
2750static int nand_suspend(struct mtd_info *mtd) 2760static int nand_suspend(struct mtd_info *mtd)
2751{ 2761{
2752 struct nand_chip *chip = mtd->priv; 2762 return nand_get_device(mtd, FL_PM_SUSPENDED);
2753
2754 return nand_get_device(chip, mtd, FL_PM_SUSPENDED);
2755} 2763}
2756 2764
2757/** 2765/**
@@ -2849,6 +2857,8 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2849 int i; 2857 int i;
2850 int val; 2858 int val;
2851 2859
2860 /* ONFI need to be probed in 8 bits mode */
2861 WARN_ON(chip->options & NAND_BUSWIDTH_16);
2852 /* Try ONFI for unknown chip or LP */ 2862 /* Try ONFI for unknown chip or LP */
2853 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1); 2863 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
2854 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' || 2864 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
@@ -2913,7 +2923,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2913 * 2923 *
2914 * Check if an ID string is repeated within a given sequence of bytes at 2924 * Check if an ID string is repeated within a given sequence of bytes at
2915 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a 2925 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
2916 * period of 2). This is a helper function for nand_id_len(). Returns non-zero 2926 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
2917 * if the repetition has a period of @period; otherwise, returns zero. 2927 * if the repetition has a period of @period; otherwise, returns zero.
2918 */ 2928 */
2919static int nand_id_has_period(u8 *id_data, int arrlen, int period) 2929static int nand_id_has_period(u8 *id_data, int arrlen, int period)
@@ -3242,11 +3252,15 @@ ident_done:
3242 break; 3252 break;
3243 } 3253 }
3244 3254
3245 /* 3255 if (chip->options & NAND_BUSWIDTH_AUTO) {
3246 * Check, if buswidth is correct. Hardware drivers should set 3256 WARN_ON(chip->options & NAND_BUSWIDTH_16);
3247 * chip correct! 3257 chip->options |= busw;
3248 */ 3258 nand_set_defaults(chip, busw);
3249 if (busw != (chip->options & NAND_BUSWIDTH_16)) { 3259 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
3260 /*
3261 * Check, if buswidth is correct. Hardware drivers should set
3262 * chip correct!
3263 */
3250 pr_info("NAND device: Manufacturer ID:" 3264 pr_info("NAND device: Manufacturer ID:"
3251 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, 3265 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
3252 *dev_id, nand_manuf_ids[maf_idx].name, mtd->name); 3266 *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
@@ -3285,10 +3299,10 @@ ident_done:
3285 chip->cmdfunc = nand_command_lp; 3299 chip->cmdfunc = nand_command_lp;
3286 3300
3287 pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s)," 3301 pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
3288 " page size: %d, OOB size: %d\n", 3302 " %dMiB, page size: %d, OOB size: %d\n",
3289 *maf_id, *dev_id, nand_manuf_ids[maf_idx].name, 3303 *maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
3290 chip->onfi_version ? chip->onfi_params.model : type->name, 3304 chip->onfi_version ? chip->onfi_params.model : type->name,
3291 mtd->writesize, mtd->oobsize); 3305 (int)(chip->chipsize >> 20), mtd->writesize, mtd->oobsize);
3292 3306
3293 return type; 3307 return type;
3294} 3308}
@@ -3327,6 +3341,8 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3327 return PTR_ERR(type); 3341 return PTR_ERR(type);
3328 } 3342 }
3329 3343
3344 chip->select_chip(mtd, -1);
3345
3330 /* Check for a chip array */ 3346 /* Check for a chip array */
3331 for (i = 1; i < maxchips; i++) { 3347 for (i = 1; i < maxchips; i++) {
3332 chip->select_chip(mtd, i); 3348 chip->select_chip(mtd, i);
@@ -3336,8 +3352,11 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3336 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 3352 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3337 /* Read manufacturer and device IDs */ 3353 /* Read manufacturer and device IDs */
3338 if (nand_maf_id != chip->read_byte(mtd) || 3354 if (nand_maf_id != chip->read_byte(mtd) ||
3339 nand_dev_id != chip->read_byte(mtd)) 3355 nand_dev_id != chip->read_byte(mtd)) {
3356 chip->select_chip(mtd, -1);
3340 break; 3357 break;
3358 }
3359 chip->select_chip(mtd, -1);
3341 } 3360 }
3342 if (i > 1) 3361 if (i > 1)
3343 pr_info("%d NAND chips detected\n", i); 3362 pr_info("%d NAND chips detected\n", i);
@@ -3596,9 +3615,6 @@ int nand_scan_tail(struct mtd_info *mtd)
3596 /* Initialize state */ 3615 /* Initialize state */
3597 chip->state = FL_READY; 3616 chip->state = FL_READY;
3598 3617
3599 /* De-select the device */
3600 chip->select_chip(mtd, -1);
3601
3602 /* Invalidate the pagebuffer reference */ 3618 /* Invalidate the pagebuffer reference */
3603 chip->pagebuf = -1; 3619 chip->pagebuf = -1;
3604 3620
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index c3c13e64a2f0..818b65c85d12 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -42,6 +42,8 @@
42#include <linux/sched.h> 42#include <linux/sched.h>
43#include <linux/fs.h> 43#include <linux/fs.h>
44#include <linux/pagemap.h> 44#include <linux/pagemap.h>
45#include <linux/seq_file.h>
46#include <linux/debugfs.h>
45 47
46/* Default simulator parameters values */ 48/* Default simulator parameters values */
47#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \ 49#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
@@ -105,7 +107,6 @@ static char *weakblocks = NULL;
105static char *weakpages = NULL; 107static char *weakpages = NULL;
106static unsigned int bitflips = 0; 108static unsigned int bitflips = 0;
107static char *gravepages = NULL; 109static char *gravepages = NULL;
108static unsigned int rptwear = 0;
109static unsigned int overridesize = 0; 110static unsigned int overridesize = 0;
110static char *cache_file = NULL; 111static char *cache_file = NULL;
111static unsigned int bbt; 112static unsigned int bbt;
@@ -130,7 +131,6 @@ module_param(weakblocks, charp, 0400);
130module_param(weakpages, charp, 0400); 131module_param(weakpages, charp, 0400);
131module_param(bitflips, uint, 0400); 132module_param(bitflips, uint, 0400);
132module_param(gravepages, charp, 0400); 133module_param(gravepages, charp, 0400);
133module_param(rptwear, uint, 0400);
134module_param(overridesize, uint, 0400); 134module_param(overridesize, uint, 0400);
135module_param(cache_file, charp, 0400); 135module_param(cache_file, charp, 0400);
136module_param(bbt, uint, 0400); 136module_param(bbt, uint, 0400);
@@ -162,7 +162,6 @@ MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (z
162MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]" 162MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
163 " separated by commas e.g. 1401:2 means page 1401" 163 " separated by commas e.g. 1401:2 means page 1401"
164 " can be read only twice before failing"); 164 " can be read only twice before failing");
165MODULE_PARM_DESC(rptwear, "Number of erases between reporting wear, if not zero");
166MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. " 165MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
167 "The size is specified in erase blocks and as the exponent of a power of two" 166 "The size is specified in erase blocks and as the exponent of a power of two"
168 " e.g. 5 means a size of 32 erase blocks"); 167 " e.g. 5 means a size of 32 erase blocks");
@@ -286,6 +285,11 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
286/* Maximum page cache pages needed to read or write a NAND page to the cache_file */ 285/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
287#define NS_MAX_HELD_PAGES 16 286#define NS_MAX_HELD_PAGES 16
288 287
288struct nandsim_debug_info {
289 struct dentry *dfs_root;
290 struct dentry *dfs_wear_report;
291};
292
289/* 293/*
290 * A union to represent flash memory contents and flash buffer. 294 * A union to represent flash memory contents and flash buffer.
291 */ 295 */
@@ -365,6 +369,8 @@ struct nandsim {
365 void *file_buf; 369 void *file_buf;
366 struct page *held_pages[NS_MAX_HELD_PAGES]; 370 struct page *held_pages[NS_MAX_HELD_PAGES];
367 int held_cnt; 371 int held_cnt;
372
373 struct nandsim_debug_info dbg;
368}; 374};
369 375
370/* 376/*
@@ -442,11 +448,123 @@ static LIST_HEAD(grave_pages);
442static unsigned long *erase_block_wear = NULL; 448static unsigned long *erase_block_wear = NULL;
443static unsigned int wear_eb_count = 0; 449static unsigned int wear_eb_count = 0;
444static unsigned long total_wear = 0; 450static unsigned long total_wear = 0;
445static unsigned int rptwear_cnt = 0;
446 451
447/* MTD structure for NAND controller */ 452/* MTD structure for NAND controller */
448static struct mtd_info *nsmtd; 453static struct mtd_info *nsmtd;
449 454
455static int nandsim_debugfs_show(struct seq_file *m, void *private)
456{
457 unsigned long wmin = -1, wmax = 0, avg;
458 unsigned long deciles[10], decile_max[10], tot = 0;
459 unsigned int i;
460
461 /* Calc wear stats */
462 for (i = 0; i < wear_eb_count; ++i) {
463 unsigned long wear = erase_block_wear[i];
464 if (wear < wmin)
465 wmin = wear;
466 if (wear > wmax)
467 wmax = wear;
468 tot += wear;
469 }
470
471 for (i = 0; i < 9; ++i) {
472 deciles[i] = 0;
473 decile_max[i] = (wmax * (i + 1) + 5) / 10;
474 }
475 deciles[9] = 0;
476 decile_max[9] = wmax;
477 for (i = 0; i < wear_eb_count; ++i) {
478 int d;
479 unsigned long wear = erase_block_wear[i];
480 for (d = 0; d < 10; ++d)
481 if (wear <= decile_max[d]) {
482 deciles[d] += 1;
483 break;
484 }
485 }
486 avg = tot / wear_eb_count;
487
488 /* Output wear report */
489 seq_printf(m, "Total numbers of erases: %lu\n", tot);
490 seq_printf(m, "Number of erase blocks: %u\n", wear_eb_count);
491 seq_printf(m, "Average number of erases: %lu\n", avg);
492 seq_printf(m, "Maximum number of erases: %lu\n", wmax);
493 seq_printf(m, "Minimum number of erases: %lu\n", wmin);
494 for (i = 0; i < 10; ++i) {
495 unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
496 if (from > decile_max[i])
497 continue;
498 seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n",
499 from,
500 decile_max[i],
501 deciles[i]);
502 }
503
504 return 0;
505}
506
507static int nandsim_debugfs_open(struct inode *inode, struct file *file)
508{
509 return single_open(file, nandsim_debugfs_show, inode->i_private);
510}
511
512static const struct file_operations dfs_fops = {
513 .open = nandsim_debugfs_open,
514 .read = seq_read,
515 .llseek = seq_lseek,
516 .release = single_release,
517};
518
519/**
520 * nandsim_debugfs_create - initialize debugfs
521 * @dev: nandsim device description object
522 *
523 * This function creates all debugfs files for UBI device @ubi. Returns zero in
524 * case of success and a negative error code in case of failure.
525 */
526static int nandsim_debugfs_create(struct nandsim *dev)
527{
528 struct nandsim_debug_info *dbg = &dev->dbg;
529 struct dentry *dent;
530 int err;
531
532 if (!IS_ENABLED(CONFIG_DEBUG_FS))
533 return 0;
534
535 dent = debugfs_create_dir("nandsim", NULL);
536 if (IS_ERR_OR_NULL(dent)) {
537 int err = dent ? -ENODEV : PTR_ERR(dent);
538
539 NS_ERR("cannot create \"nandsim\" debugfs directory, err %d\n",
540 err);
541 return err;
542 }
543 dbg->dfs_root = dent;
544
545 dent = debugfs_create_file("wear_report", S_IRUSR,
546 dbg->dfs_root, dev, &dfs_fops);
547 if (IS_ERR_OR_NULL(dent))
548 goto out_remove;
549 dbg->dfs_wear_report = dent;
550
551 return 0;
552
553out_remove:
554 debugfs_remove_recursive(dbg->dfs_root);
555 err = dent ? PTR_ERR(dent) : -ENODEV;
556 return err;
557}
558
559/**
560 * nandsim_debugfs_remove - destroy all debugfs files
561 */
562static void nandsim_debugfs_remove(struct nandsim *ns)
563{
564 if (IS_ENABLED(CONFIG_DEBUG_FS))
565 debugfs_remove_recursive(ns->dbg.dfs_root);
566}
567
450/* 568/*
451 * Allocate array of page pointers, create slab allocation for an array 569 * Allocate array of page pointers, create slab allocation for an array
452 * and initialize the array by NULL pointers. 570 * and initialize the array by NULL pointers.
@@ -911,8 +1029,6 @@ static int setup_wear_reporting(struct mtd_info *mtd)
911{ 1029{
912 size_t mem; 1030 size_t mem;
913 1031
914 if (!rptwear)
915 return 0;
916 wear_eb_count = div_u64(mtd->size, mtd->erasesize); 1032 wear_eb_count = div_u64(mtd->size, mtd->erasesize);
917 mem = wear_eb_count * sizeof(unsigned long); 1033 mem = wear_eb_count * sizeof(unsigned long);
918 if (mem / sizeof(unsigned long) != wear_eb_count) { 1034 if (mem / sizeof(unsigned long) != wear_eb_count) {
@@ -929,64 +1045,18 @@ static int setup_wear_reporting(struct mtd_info *mtd)
929 1045
930static void update_wear(unsigned int erase_block_no) 1046static void update_wear(unsigned int erase_block_no)
931{ 1047{
932 unsigned long wmin = -1, wmax = 0, avg;
933 unsigned long deciles[10], decile_max[10], tot = 0;
934 unsigned int i;
935
936 if (!erase_block_wear) 1048 if (!erase_block_wear)
937 return; 1049 return;
938 total_wear += 1; 1050 total_wear += 1;
1051 /*
1052 * TODO: Notify this through a debugfs entry,
1053 * instead of showing an error message.
1054 */
939 if (total_wear == 0) 1055 if (total_wear == 0)
940 NS_ERR("Erase counter total overflow\n"); 1056 NS_ERR("Erase counter total overflow\n");
941 erase_block_wear[erase_block_no] += 1; 1057 erase_block_wear[erase_block_no] += 1;
942 if (erase_block_wear[erase_block_no] == 0) 1058 if (erase_block_wear[erase_block_no] == 0)
943 NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no); 1059 NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
944 rptwear_cnt += 1;
945 if (rptwear_cnt < rptwear)
946 return;
947 rptwear_cnt = 0;
948 /* Calc wear stats */
949 for (i = 0; i < wear_eb_count; ++i) {
950 unsigned long wear = erase_block_wear[i];
951 if (wear < wmin)
952 wmin = wear;
953 if (wear > wmax)
954 wmax = wear;
955 tot += wear;
956 }
957 for (i = 0; i < 9; ++i) {
958 deciles[i] = 0;
959 decile_max[i] = (wmax * (i + 1) + 5) / 10;
960 }
961 deciles[9] = 0;
962 decile_max[9] = wmax;
963 for (i = 0; i < wear_eb_count; ++i) {
964 int d;
965 unsigned long wear = erase_block_wear[i];
966 for (d = 0; d < 10; ++d)
967 if (wear <= decile_max[d]) {
968 deciles[d] += 1;
969 break;
970 }
971 }
972 avg = tot / wear_eb_count;
973 /* Output wear report */
974 NS_INFO("*** Wear Report ***\n");
975 NS_INFO("Total numbers of erases: %lu\n", tot);
976 NS_INFO("Number of erase blocks: %u\n", wear_eb_count);
977 NS_INFO("Average number of erases: %lu\n", avg);
978 NS_INFO("Maximum number of erases: %lu\n", wmax);
979 NS_INFO("Minimum number of erases: %lu\n", wmin);
980 for (i = 0; i < 10; ++i) {
981 unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
982 if (from > decile_max[i])
983 continue;
984 NS_INFO("Number of ebs with erase counts from %lu to %lu : %lu\n",
985 from,
986 decile_max[i],
987 deciles[i]);
988 }
989 NS_INFO("*** End of Wear Report ***\n");
990} 1060}
991 1061
992/* 1062/*
@@ -2327,6 +2397,9 @@ static int __init ns_init_module(void)
2327 if ((retval = setup_wear_reporting(nsmtd)) != 0) 2397 if ((retval = setup_wear_reporting(nsmtd)) != 0)
2328 goto err_exit; 2398 goto err_exit;
2329 2399
2400 if ((retval = nandsim_debugfs_create(nand)) != 0)
2401 goto err_exit;
2402
2330 if ((retval = init_nandsim(nsmtd)) != 0) 2403 if ((retval = init_nandsim(nsmtd)) != 0)
2331 goto err_exit; 2404 goto err_exit;
2332 2405
@@ -2366,6 +2439,7 @@ static void __exit ns_cleanup_module(void)
2366 struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv; 2439 struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv;
2367 int i; 2440 int i;
2368 2441
2442 nandsim_debugfs_remove(ns);
2369 free_nandsim(ns); /* Free nandsim private resources */ 2443 free_nandsim(ns); /* Free nandsim private resources */
2370 nand_release(nsmtd); /* Unregister driver */ 2444 nand_release(nsmtd); /* Unregister driver */
2371 for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i) 2445 for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 5fd3f010e3ae..8e148f1478fd 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -197,7 +197,7 @@ err:
197 return ret; 197 return ret;
198} 198}
199 199
200static int __devinit ndfc_probe(struct platform_device *ofdev) 200static int ndfc_probe(struct platform_device *ofdev)
201{ 201{
202 struct ndfc_controller *ndfc; 202 struct ndfc_controller *ndfc;
203 const __be32 *reg; 203 const __be32 *reg;
@@ -256,7 +256,7 @@ static int __devinit ndfc_probe(struct platform_device *ofdev)
256 return 0; 256 return 0;
257} 257}
258 258
259static int __devexit ndfc_remove(struct platform_device *ofdev) 259static int ndfc_remove(struct platform_device *ofdev)
260{ 260{
261 struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev); 261 struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
262 262
@@ -279,7 +279,7 @@ static struct platform_driver ndfc_driver = {
279 .of_match_table = ndfc_match, 279 .of_match_table = ndfc_match,
280 }, 280 },
281 .probe = ndfc_probe, 281 .probe = ndfc_probe,
282 .remove = __devexit_p(ndfc_remove), 282 .remove = ndfc_remove,
283}; 283};
284 284
285module_platform_driver(ndfc_driver); 285module_platform_driver(ndfc_driver);
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
deleted file mode 100644
index 9ee0c4edfacf..000000000000
--- a/drivers/mtd/nand/nomadik_nand.c
+++ /dev/null
@@ -1,235 +0,0 @@
1/*
2 * drivers/mtd/nand/nomadik_nand.c
3 *
4 * Overview:
5 * Driver for on-board NAND flash on Nomadik Platforms
6 *
7 * Copyright © 2007 STMicroelectronics Pvt. Ltd.
8 * Author: Sachin Verma <sachin.verma@st.com>
9 *
10 * Copyright © 2009 Alessandro Rubini
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 */
23
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/types.h>
27#include <linux/mtd/mtd.h>
28#include <linux/mtd/nand.h>
29#include <linux/mtd/nand_ecc.h>
30#include <linux/platform_device.h>
31#include <linux/mtd/partitions.h>
32#include <linux/io.h>
33#include <linux/slab.h>
34#include <linux/platform_data/mtd-nomadik-nand.h>
35#include <mach/fsmc.h>
36
37#include <mtd/mtd-abi.h>
38
39struct nomadik_nand_host {
40 struct mtd_info mtd;
41 struct nand_chip nand;
42 void __iomem *data_va;
43 void __iomem *cmd_va;
44 void __iomem *addr_va;
45 struct nand_bbt_descr *bbt_desc;
46};
47
48static struct nand_ecclayout nomadik_ecc_layout = {
49 .eccbytes = 3 * 4,
50 .eccpos = { /* each subpage has 16 bytes: pos 2,3,4 hosts ECC */
51 0x02, 0x03, 0x04,
52 0x12, 0x13, 0x14,
53 0x22, 0x23, 0x24,
54 0x32, 0x33, 0x34},
55 /* let's keep bytes 5,6,7 for us, just in case we change ECC algo */
56 .oobfree = { {0x08, 0x08}, {0x18, 0x08}, {0x28, 0x08}, {0x38, 0x08} },
57};
58
59static void nomadik_ecc_control(struct mtd_info *mtd, int mode)
60{
61 /* No need to enable hw ecc, it's on by default */
62}
63
64static void nomadik_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
65{
66 struct nand_chip *nand = mtd->priv;
67 struct nomadik_nand_host *host = nand->priv;
68
69 if (cmd == NAND_CMD_NONE)
70 return;
71
72 if (ctrl & NAND_CLE)
73 writeb(cmd, host->cmd_va);
74 else
75 writeb(cmd, host->addr_va);
76}
77
78static int nomadik_nand_probe(struct platform_device *pdev)
79{
80 struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data;
81 struct nomadik_nand_host *host;
82 struct mtd_info *mtd;
83 struct nand_chip *nand;
84 struct resource *res;
85 int ret = 0;
86
87 /* Allocate memory for the device structure (and zero it) */
88 host = kzalloc(sizeof(struct nomadik_nand_host), GFP_KERNEL);
89 if (!host) {
90 dev_err(&pdev->dev, "Failed to allocate device structure.\n");
91 return -ENOMEM;
92 }
93
94 /* Call the client's init function, if any */
95 if (pdata->init)
96 ret = pdata->init();
97 if (ret < 0) {
98 dev_err(&pdev->dev, "Init function failed\n");
99 goto err;
100 }
101
102 /* ioremap three regions */
103 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
104 if (!res) {
105 ret = -EIO;
106 goto err_unmap;
107 }
108 host->addr_va = ioremap(res->start, resource_size(res));
109
110 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
111 if (!res) {
112 ret = -EIO;
113 goto err_unmap;
114 }
115 host->data_va = ioremap(res->start, resource_size(res));
116
117 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
118 if (!res) {
119 ret = -EIO;
120 goto err_unmap;
121 }
122 host->cmd_va = ioremap(res->start, resource_size(res));
123
124 if (!host->addr_va || !host->data_va || !host->cmd_va) {
125 ret = -ENOMEM;
126 goto err_unmap;
127 }
128
129 /* Link all private pointers */
130 mtd = &host->mtd;
131 nand = &host->nand;
132 mtd->priv = nand;
133 nand->priv = host;
134
135 host->mtd.owner = THIS_MODULE;
136 nand->IO_ADDR_R = host->data_va;
137 nand->IO_ADDR_W = host->data_va;
138 nand->cmd_ctrl = nomadik_cmd_ctrl;
139
140 /*
141 * This stanza declares ECC_HW but uses soft routines. It's because
142 * HW claims to make the calculation but not the correction. However,
143 * I haven't managed to get the desired data out of it until now.
144 */
145 nand->ecc.mode = NAND_ECC_SOFT;
146 nand->ecc.layout = &nomadik_ecc_layout;
147 nand->ecc.hwctl = nomadik_ecc_control;
148 nand->ecc.size = 512;
149 nand->ecc.bytes = 3;
150
151 nand->options = pdata->options;
152
153 /*
154 * Scan to find existence of the device
155 */
156 if (nand_scan(&host->mtd, 1)) {
157 ret = -ENXIO;
158 goto err_unmap;
159 }
160
161 mtd_device_register(&host->mtd, pdata->parts, pdata->nparts);
162
163 platform_set_drvdata(pdev, host);
164 return 0;
165
166 err_unmap:
167 if (host->cmd_va)
168 iounmap(host->cmd_va);
169 if (host->data_va)
170 iounmap(host->data_va);
171 if (host->addr_va)
172 iounmap(host->addr_va);
173 err:
174 kfree(host);
175 return ret;
176}
177
178/*
179 * Clean up routine
180 */
181static int nomadik_nand_remove(struct platform_device *pdev)
182{
183 struct nomadik_nand_host *host = platform_get_drvdata(pdev);
184 struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data;
185
186 if (pdata->exit)
187 pdata->exit();
188
189 if (host) {
190 nand_release(&host->mtd);
191 iounmap(host->cmd_va);
192 iounmap(host->data_va);
193 iounmap(host->addr_va);
194 kfree(host);
195 }
196 return 0;
197}
198
199static int nomadik_nand_suspend(struct device *dev)
200{
201 struct nomadik_nand_host *host = dev_get_drvdata(dev);
202 int ret = 0;
203 if (host)
204 ret = mtd_suspend(&host->mtd);
205 return ret;
206}
207
208static int nomadik_nand_resume(struct device *dev)
209{
210 struct nomadik_nand_host *host = dev_get_drvdata(dev);
211 if (host)
212 mtd_resume(&host->mtd);
213 return 0;
214}
215
216static const struct dev_pm_ops nomadik_nand_pm_ops = {
217 .suspend = nomadik_nand_suspend,
218 .resume = nomadik_nand_resume,
219};
220
221static struct platform_driver nomadik_nand_driver = {
222 .probe = nomadik_nand_probe,
223 .remove = nomadik_nand_remove,
224 .driver = {
225 .owner = THIS_MODULE,
226 .name = "nomadik_nand",
227 .pm = &nomadik_nand_pm_ops,
228 },
229};
230
231module_platform_driver(nomadik_nand_driver);
232
233MODULE_LICENSE("GPL");
234MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)");
235MODULE_DESCRIPTION("NAND driver for Nomadik Platform");
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 94dc46bc118c..a6191198d259 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -246,7 +246,7 @@ static void nuc900_nand_enable(struct nuc900_nand *nand)
246 spin_unlock(&nand->lock); 246 spin_unlock(&nand->lock);
247} 247}
248 248
249static int __devinit nuc900_nand_probe(struct platform_device *pdev) 249static int nuc900_nand_probe(struct platform_device *pdev)
250{ 250{
251 struct nuc900_nand *nuc900_nand; 251 struct nuc900_nand *nuc900_nand;
252 struct nand_chip *chip; 252 struct nand_chip *chip;
@@ -317,7 +317,7 @@ fail1: kfree(nuc900_nand);
317 return retval; 317 return retval;
318} 318}
319 319
320static int __devexit nuc900_nand_remove(struct platform_device *pdev) 320static int nuc900_nand_remove(struct platform_device *pdev)
321{ 321{
322 struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); 322 struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
323 struct resource *res; 323 struct resource *res;
@@ -340,7 +340,7 @@ static int __devexit nuc900_nand_remove(struct platform_device *pdev)
340 340
341static struct platform_driver nuc900_nand_driver = { 341static struct platform_driver nuc900_nand_driver = {
342 .probe = nuc900_nand_probe, 342 .probe = nuc900_nand_probe,
343 .remove = __devexit_p(nuc900_nand_remove), 343 .remove = nuc900_nand_remove,
344 .driver = { 344 .driver = {
345 .name = "nuc900-fmi", 345 .name = "nuc900-fmi",
346 .owner = THIS_MODULE, 346 .owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 1f34ba104ef4..0002d5e94f0d 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1323,7 +1323,7 @@ static void omap3_free_bch(struct mtd_info *mtd)
1323} 1323}
1324#endif /* CONFIG_MTD_NAND_OMAP_BCH */ 1324#endif /* CONFIG_MTD_NAND_OMAP_BCH */
1325 1325
1326static int __devinit omap_nand_probe(struct platform_device *pdev) 1326static int omap_nand_probe(struct platform_device *pdev)
1327{ 1327{
1328 struct omap_nand_info *info; 1328 struct omap_nand_info *info;
1329 struct omap_nand_platform_data *pdata; 1329 struct omap_nand_platform_data *pdata;
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index aefaf8cd31ef..cd72b9299f6b 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -194,7 +194,7 @@ no_res:
194 return ret; 194 return ret;
195} 195}
196 196
197static int __devexit orion_nand_remove(struct platform_device *pdev) 197static int orion_nand_remove(struct platform_device *pdev)
198{ 198{
199 struct mtd_info *mtd = platform_get_drvdata(pdev); 199 struct mtd_info *mtd = platform_get_drvdata(pdev);
200 struct nand_chip *nc = mtd->priv; 200 struct nand_chip *nc = mtd->priv;
@@ -223,7 +223,7 @@ static struct of_device_id orion_nand_of_match_table[] = {
223#endif 223#endif
224 224
225static struct platform_driver orion_nand_driver = { 225static struct platform_driver orion_nand_driver = {
226 .remove = __devexit_p(orion_nand_remove), 226 .remove = orion_nand_remove,
227 .driver = { 227 .driver = {
228 .name = "orion_nand", 228 .name = "orion_nand",
229 .owner = THIS_MODULE, 229 .owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 1440e51cedcc..5a67082c07ee 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -89,7 +89,7 @@ int pasemi_device_ready(struct mtd_info *mtd)
89 return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR); 89 return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
90} 90}
91 91
92static int __devinit pasemi_nand_probe(struct platform_device *ofdev) 92static int pasemi_nand_probe(struct platform_device *ofdev)
93{ 93{
94 struct pci_dev *pdev; 94 struct pci_dev *pdev;
95 struct device_node *np = ofdev->dev.of_node; 95 struct device_node *np = ofdev->dev.of_node;
@@ -184,7 +184,7 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
184 return err; 184 return err;
185} 185}
186 186
187static int __devexit pasemi_nand_remove(struct platform_device *ofdev) 187static int pasemi_nand_remove(struct platform_device *ofdev)
188{ 188{
189 struct nand_chip *chip; 189 struct nand_chip *chip;
190 190
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index a47ee68a0cfa..c004566a9ad2 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -28,7 +28,7 @@ static const char *part_probe_types[] = { "cmdlinepart", NULL };
28/* 28/*
29 * Probe for the NAND device. 29 * Probe for the NAND device.
30 */ 30 */
31static int __devinit plat_nand_probe(struct platform_device *pdev) 31static int plat_nand_probe(struct platform_device *pdev)
32{ 32{
33 struct platform_nand_data *pdata = pdev->dev.platform_data; 33 struct platform_nand_data *pdata = pdev->dev.platform_data;
34 struct mtd_part_parser_data ppdata; 34 struct mtd_part_parser_data ppdata;
@@ -134,7 +134,7 @@ out_free:
134/* 134/*
135 * Remove a NAND device. 135 * Remove a NAND device.
136 */ 136 */
137static int __devexit plat_nand_remove(struct platform_device *pdev) 137static int plat_nand_remove(struct platform_device *pdev)
138{ 138{
139 struct plat_nand_data *data = platform_get_drvdata(pdev); 139 struct plat_nand_data *data = platform_get_drvdata(pdev);
140 struct platform_nand_data *pdata = pdev->dev.platform_data; 140 struct platform_nand_data *pdata = pdev->dev.platform_data;
@@ -160,7 +160,7 @@ MODULE_DEVICE_TABLE(of, plat_nand_match);
160 160
161static struct platform_driver plat_nand_driver = { 161static struct platform_driver plat_nand_driver = {
162 .probe = plat_nand_probe, 162 .probe = plat_nand_probe,
163 .remove = __devexit_p(plat_nand_remove), 163 .remove = plat_nand_remove,
164 .driver = { 164 .driver = {
165 .name = "gen_nand", 165 .name = "gen_nand",
166 .owner = THIS_MODULE, 166 .owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 79ded48e7427..df954b4dcba2 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -730,11 +730,14 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
730 struct s3c2410_nand_mtd *mtd, 730 struct s3c2410_nand_mtd *mtd,
731 struct s3c2410_nand_set *set) 731 struct s3c2410_nand_set *set)
732{ 732{
733 if (set) 733 if (set) {
734 mtd->mtd.name = set->name; 734 mtd->mtd.name = set->name;
735 735
736 return mtd_device_parse_register(&mtd->mtd, NULL, NULL, 736 return mtd_device_parse_register(&mtd->mtd, NULL, NULL,
737 set->partitions, set->nr_partitions); 737 set->partitions, set->nr_partitions);
738 }
739
740 return -ENODEV;
738} 741}
739 742
740/** 743/**
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index f48ac5d80bbf..57b3971c9c0a 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -23,11 +23,18 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/completion.h>
26#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/dmaengine.h>
29#include <linux/dma-mapping.h>
27#include <linux/interrupt.h> 30#include <linux/interrupt.h>
28#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/of.h>
33#include <linux/of_device.h>
34#include <linux/of_mtd.h>
29#include <linux/platform_device.h> 35#include <linux/platform_device.h>
30#include <linux/pm_runtime.h> 36#include <linux/pm_runtime.h>
37#include <linux/sh_dma.h>
31#include <linux/slab.h> 38#include <linux/slab.h>
32#include <linux/string.h> 39#include <linux/string.h>
33 40
@@ -106,6 +113,84 @@ static void wait_completion(struct sh_flctl *flctl)
106 writeb(0x0, FLTRCR(flctl)); 113 writeb(0x0, FLTRCR(flctl));
107} 114}
108 115
116static void flctl_dma_complete(void *param)
117{
118 struct sh_flctl *flctl = param;
119
120 complete(&flctl->dma_complete);
121}
122
123static void flctl_release_dma(struct sh_flctl *flctl)
124{
125 if (flctl->chan_fifo0_rx) {
126 dma_release_channel(flctl->chan_fifo0_rx);
127 flctl->chan_fifo0_rx = NULL;
128 }
129 if (flctl->chan_fifo0_tx) {
130 dma_release_channel(flctl->chan_fifo0_tx);
131 flctl->chan_fifo0_tx = NULL;
132 }
133}
134
135static void flctl_setup_dma(struct sh_flctl *flctl)
136{
137 dma_cap_mask_t mask;
138 struct dma_slave_config cfg;
139 struct platform_device *pdev = flctl->pdev;
140 struct sh_flctl_platform_data *pdata = pdev->dev.platform_data;
141 int ret;
142
143 if (!pdata)
144 return;
145
146 if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
147 return;
148
149 /* We can only either use DMA for both Tx and Rx or not use it at all */
150 dma_cap_zero(mask);
151 dma_cap_set(DMA_SLAVE, mask);
152
153 flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
154 (void *)pdata->slave_id_fifo0_tx);
155 dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
156 flctl->chan_fifo0_tx);
157
158 if (!flctl->chan_fifo0_tx)
159 return;
160
161 memset(&cfg, 0, sizeof(cfg));
162 cfg.slave_id = pdata->slave_id_fifo0_tx;
163 cfg.direction = DMA_MEM_TO_DEV;
164 cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl);
165 cfg.src_addr = 0;
166 ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
167 if (ret < 0)
168 goto err;
169
170 flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
171 (void *)pdata->slave_id_fifo0_rx);
172 dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
173 flctl->chan_fifo0_rx);
174
175 if (!flctl->chan_fifo0_rx)
176 goto err;
177
178 cfg.slave_id = pdata->slave_id_fifo0_rx;
179 cfg.direction = DMA_DEV_TO_MEM;
180 cfg.dst_addr = 0;
181 cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl);
182 ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
183 if (ret < 0)
184 goto err;
185
186 init_completion(&flctl->dma_complete);
187
188 return;
189
190err:
191 flctl_release_dma(flctl);
192}
193
109static void set_addr(struct mtd_info *mtd, int column, int page_addr) 194static void set_addr(struct mtd_info *mtd, int column, int page_addr)
110{ 195{
111 struct sh_flctl *flctl = mtd_to_flctl(mtd); 196 struct sh_flctl *flctl = mtd_to_flctl(mtd);
@@ -225,7 +310,7 @@ static enum flctl_ecc_res_t wait_recfifo_ready
225 310
226 for (i = 0; i < 3; i++) { 311 for (i = 0; i < 3; i++) {
227 uint8_t org; 312 uint8_t org;
228 int index; 313 unsigned int index;
229 314
230 data = readl(ecc_reg[i]); 315 data = readl(ecc_reg[i]);
231 316
@@ -261,6 +346,70 @@ static void wait_wecfifo_ready(struct sh_flctl *flctl)
261 timeout_error(flctl, __func__); 346 timeout_error(flctl, __func__);
262} 347}
263 348
349static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
350 int len, enum dma_data_direction dir)
351{
352 struct dma_async_tx_descriptor *desc = NULL;
353 struct dma_chan *chan;
354 enum dma_transfer_direction tr_dir;
355 dma_addr_t dma_addr;
356 dma_cookie_t cookie = -EINVAL;
357 uint32_t reg;
358 int ret;
359
360 if (dir == DMA_FROM_DEVICE) {
361 chan = flctl->chan_fifo0_rx;
362 tr_dir = DMA_DEV_TO_MEM;
363 } else {
364 chan = flctl->chan_fifo0_tx;
365 tr_dir = DMA_MEM_TO_DEV;
366 }
367
368 dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
369
370 if (dma_addr)
371 desc = dmaengine_prep_slave_single(chan, dma_addr, len,
372 tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
373
374 if (desc) {
375 reg = readl(FLINTDMACR(flctl));
376 reg |= DREQ0EN;
377 writel(reg, FLINTDMACR(flctl));
378
379 desc->callback = flctl_dma_complete;
380 desc->callback_param = flctl;
381 cookie = dmaengine_submit(desc);
382
383 dma_async_issue_pending(chan);
384 } else {
385 /* DMA failed, fall back to PIO */
386 flctl_release_dma(flctl);
387 dev_warn(&flctl->pdev->dev,
388 "DMA failed, falling back to PIO\n");
389 ret = -EIO;
390 goto out;
391 }
392
393 ret =
394 wait_for_completion_timeout(&flctl->dma_complete,
395 msecs_to_jiffies(3000));
396
397 if (ret <= 0) {
398 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
399 dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
400 }
401
402out:
403 reg = readl(FLINTDMACR(flctl));
404 reg &= ~DREQ0EN;
405 writel(reg, FLINTDMACR(flctl));
406
407 dma_unmap_single(chan->device->dev, dma_addr, len, dir);
408
409 /* ret > 0 is success */
410 return ret;
411}
412
264static void read_datareg(struct sh_flctl *flctl, int offset) 413static void read_datareg(struct sh_flctl *flctl, int offset)
265{ 414{
266 unsigned long data; 415 unsigned long data;
@@ -279,11 +428,20 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
279 428
280 len_4align = (rlen + 3) / 4; 429 len_4align = (rlen + 3) / 4;
281 430
431 /* initiate DMA transfer */
432 if (flctl->chan_fifo0_rx && rlen >= 32 &&
433 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
434 goto convert; /* DMA success */
435
436 /* do polling transfer */
282 for (i = 0; i < len_4align; i++) { 437 for (i = 0; i < len_4align; i++) {
283 wait_rfifo_ready(flctl); 438 wait_rfifo_ready(flctl);
284 buf[i] = readl(FLDTFIFO(flctl)); 439 buf[i] = readl(FLDTFIFO(flctl));
285 buf[i] = be32_to_cpu(buf[i]);
286 } 440 }
441
442convert:
443 for (i = 0; i < len_4align; i++)
444 buf[i] = be32_to_cpu(buf[i]);
287} 445}
288 446
289static enum flctl_ecc_res_t read_ecfiforeg 447static enum flctl_ecc_res_t read_ecfiforeg
@@ -305,28 +463,39 @@ static enum flctl_ecc_res_t read_ecfiforeg
305 return res; 463 return res;
306} 464}
307 465
308static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset) 466static void write_fiforeg(struct sh_flctl *flctl, int rlen,
467 unsigned int offset)
309{ 468{
310 int i, len_4align; 469 int i, len_4align;
311 unsigned long *data = (unsigned long *)&flctl->done_buff[offset]; 470 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
312 void *fifo_addr = (void *)FLDTFIFO(flctl);
313 471
314 len_4align = (rlen + 3) / 4; 472 len_4align = (rlen + 3) / 4;
315 for (i = 0; i < len_4align; i++) { 473 for (i = 0; i < len_4align; i++) {
316 wait_wfifo_ready(flctl); 474 wait_wfifo_ready(flctl);
317 writel(cpu_to_be32(data[i]), fifo_addr); 475 writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
318 } 476 }
319} 477}
320 478
321static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, int offset) 479static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
480 unsigned int offset)
322{ 481{
323 int i, len_4align; 482 int i, len_4align;
324 unsigned long *data = (unsigned long *)&flctl->done_buff[offset]; 483 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
325 484
326 len_4align = (rlen + 3) / 4; 485 len_4align = (rlen + 3) / 4;
486
487 for (i = 0; i < len_4align; i++)
488 buf[i] = cpu_to_be32(buf[i]);
489
490 /* initiate DMA transfer */
491 if (flctl->chan_fifo0_tx && rlen >= 32 &&
492 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
493 return; /* DMA success */
494
495 /* do polling transfer */
327 for (i = 0; i < len_4align; i++) { 496 for (i = 0; i < len_4align; i++) {
328 wait_wecfifo_ready(flctl); 497 wait_wecfifo_ready(flctl);
329 writel(cpu_to_be32(data[i]), FLECFIFO(flctl)); 498 writel(buf[i], FLECFIFO(flctl));
330 } 499 }
331} 500}
332 501
@@ -750,41 +919,35 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
750static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 919static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
751{ 920{
752 struct sh_flctl *flctl = mtd_to_flctl(mtd); 921 struct sh_flctl *flctl = mtd_to_flctl(mtd);
753 int index = flctl->index;
754 922
755 memcpy(&flctl->done_buff[index], buf, len); 923 memcpy(&flctl->done_buff[flctl->index], buf, len);
756 flctl->index += len; 924 flctl->index += len;
757} 925}
758 926
759static uint8_t flctl_read_byte(struct mtd_info *mtd) 927static uint8_t flctl_read_byte(struct mtd_info *mtd)
760{ 928{
761 struct sh_flctl *flctl = mtd_to_flctl(mtd); 929 struct sh_flctl *flctl = mtd_to_flctl(mtd);
762 int index = flctl->index;
763 uint8_t data; 930 uint8_t data;
764 931
765 data = flctl->done_buff[index]; 932 data = flctl->done_buff[flctl->index];
766 flctl->index++; 933 flctl->index++;
767 return data; 934 return data;
768} 935}
769 936
770static uint16_t flctl_read_word(struct mtd_info *mtd) 937static uint16_t flctl_read_word(struct mtd_info *mtd)
771{ 938{
772 struct sh_flctl *flctl = mtd_to_flctl(mtd); 939 struct sh_flctl *flctl = mtd_to_flctl(mtd);
773 int index = flctl->index; 940 uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index];
774 uint16_t data;
775 uint16_t *buf = (uint16_t *)&flctl->done_buff[index];
776 941
777 data = *buf; 942 flctl->index += 2;
778 flctl->index += 2; 943 return *buf;
779 return data;
780} 944}
781 945
782static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 946static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
783{ 947{
784 struct sh_flctl *flctl = mtd_to_flctl(mtd); 948 struct sh_flctl *flctl = mtd_to_flctl(mtd);
785 int index = flctl->index;
786 949
787 memcpy(buf, &flctl->done_buff[index], len); 950 memcpy(buf, &flctl->done_buff[flctl->index], len);
788 flctl->index += len; 951 flctl->index += len;
789} 952}
790 953
@@ -858,7 +1021,74 @@ static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
858 return IRQ_HANDLED; 1021 return IRQ_HANDLED;
859} 1022}
860 1023
861static int __devinit flctl_probe(struct platform_device *pdev) 1024#ifdef CONFIG_OF
1025struct flctl_soc_config {
1026 unsigned long flcmncr_val;
1027 unsigned has_hwecc:1;
1028 unsigned use_holden:1;
1029};
1030
1031static struct flctl_soc_config flctl_sh7372_config = {
1032 .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
1033 .has_hwecc = 1,
1034 .use_holden = 1,
1035};
1036
1037static const struct of_device_id of_flctl_match[] = {
1038 { .compatible = "renesas,shmobile-flctl-sh7372",
1039 .data = &flctl_sh7372_config },
1040 {},
1041};
1042MODULE_DEVICE_TABLE(of, of_flctl_match);
1043
1044static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
1045{
1046 const struct of_device_id *match;
1047 struct flctl_soc_config *config;
1048 struct sh_flctl_platform_data *pdata;
1049 struct device_node *dn = dev->of_node;
1050 int ret;
1051
1052 match = of_match_device(of_flctl_match, dev);
1053 if (match)
1054 config = (struct flctl_soc_config *)match->data;
1055 else {
1056 dev_err(dev, "%s: no OF configuration attached\n", __func__);
1057 return NULL;
1058 }
1059
1060 pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
1061 GFP_KERNEL);
1062 if (!pdata) {
1063 dev_err(dev, "%s: failed to allocate config data\n", __func__);
1064 return NULL;
1065 }
1066
1067 /* set SoC specific options */
1068 pdata->flcmncr_val = config->flcmncr_val;
1069 pdata->has_hwecc = config->has_hwecc;
1070 pdata->use_holden = config->use_holden;
1071
1072 /* parse user defined options */
1073 ret = of_get_nand_bus_width(dn);
1074 if (ret == 16)
1075 pdata->flcmncr_val |= SEL_16BIT;
1076 else if (ret != 8) {
1077 dev_err(dev, "%s: invalid bus width\n", __func__);
1078 return NULL;
1079 }
1080
1081 return pdata;
1082}
1083#else /* CONFIG_OF */
1084#define of_flctl_match NULL
1085static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
1086{
1087 return NULL;
1088}
1089#endif /* CONFIG_OF */
1090
1091static int flctl_probe(struct platform_device *pdev)
862{ 1092{
863 struct resource *res; 1093 struct resource *res;
864 struct sh_flctl *flctl; 1094 struct sh_flctl *flctl;
@@ -867,12 +1097,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
867 struct sh_flctl_platform_data *pdata; 1097 struct sh_flctl_platform_data *pdata;
868 int ret = -ENXIO; 1098 int ret = -ENXIO;
869 int irq; 1099 int irq;
870 1100 struct mtd_part_parser_data ppdata = {};
871 pdata = pdev->dev.platform_data;
872 if (pdata == NULL) {
873 dev_err(&pdev->dev, "no platform data defined\n");
874 return -EINVAL;
875 }
876 1101
877 flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL); 1102 flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
878 if (!flctl) { 1103 if (!flctl) {
@@ -904,6 +1129,17 @@ static int __devinit flctl_probe(struct platform_device *pdev)
904 goto err_flste; 1129 goto err_flste;
905 } 1130 }
906 1131
1132 if (pdev->dev.of_node)
1133 pdata = flctl_parse_dt(&pdev->dev);
1134 else
1135 pdata = pdev->dev.platform_data;
1136
1137 if (!pdata) {
1138 dev_err(&pdev->dev, "no setup data defined\n");
1139 ret = -EINVAL;
1140 goto err_pdata;
1141 }
1142
907 platform_set_drvdata(pdev, flctl); 1143 platform_set_drvdata(pdev, flctl);
908 flctl_mtd = &flctl->mtd; 1144 flctl_mtd = &flctl->mtd;
909 nand = &flctl->chip; 1145 nand = &flctl->chip;
@@ -932,6 +1168,8 @@ static int __devinit flctl_probe(struct platform_device *pdev)
932 pm_runtime_enable(&pdev->dev); 1168 pm_runtime_enable(&pdev->dev);
933 pm_runtime_resume(&pdev->dev); 1169 pm_runtime_resume(&pdev->dev);
934 1170
1171 flctl_setup_dma(flctl);
1172
935 ret = nand_scan_ident(flctl_mtd, 1, NULL); 1173 ret = nand_scan_ident(flctl_mtd, 1, NULL);
936 if (ret) 1174 if (ret)
937 goto err_chip; 1175 goto err_chip;
@@ -944,12 +1182,16 @@ static int __devinit flctl_probe(struct platform_device *pdev)
944 if (ret) 1182 if (ret)
945 goto err_chip; 1183 goto err_chip;
946 1184
947 mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts); 1185 ppdata.of_node = pdev->dev.of_node;
1186 ret = mtd_device_parse_register(flctl_mtd, NULL, &ppdata, pdata->parts,
1187 pdata->nr_parts);
948 1188
949 return 0; 1189 return 0;
950 1190
951err_chip: 1191err_chip:
1192 flctl_release_dma(flctl);
952 pm_runtime_disable(&pdev->dev); 1193 pm_runtime_disable(&pdev->dev);
1194err_pdata:
953 free_irq(irq, flctl); 1195 free_irq(irq, flctl);
954err_flste: 1196err_flste:
955 iounmap(flctl->reg); 1197 iounmap(flctl->reg);
@@ -958,10 +1200,11 @@ err_iomap:
958 return ret; 1200 return ret;
959} 1201}
960 1202
961static int __devexit flctl_remove(struct platform_device *pdev) 1203static int flctl_remove(struct platform_device *pdev)
962{ 1204{
963 struct sh_flctl *flctl = platform_get_drvdata(pdev); 1205 struct sh_flctl *flctl = platform_get_drvdata(pdev);
964 1206
1207 flctl_release_dma(flctl);
965 nand_release(&flctl->mtd); 1208 nand_release(&flctl->mtd);
966 pm_runtime_disable(&pdev->dev); 1209 pm_runtime_disable(&pdev->dev);
967 free_irq(platform_get_irq(pdev, 0), flctl); 1210 free_irq(platform_get_irq(pdev, 0), flctl);
@@ -976,6 +1219,7 @@ static struct platform_driver flctl_driver = {
976 .driver = { 1219 .driver = {
977 .name = "sh_flctl", 1220 .name = "sh_flctl",
978 .owner = THIS_MODULE, 1221 .owner = THIS_MODULE,
1222 .of_match_table = of_flctl_match,
979 }, 1223 },
980}; 1224};
981 1225
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 3421e3762a5a..127bc4271821 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -106,7 +106,7 @@ static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat,
106/* 106/*
107 * Main initialization routine 107 * Main initialization routine
108 */ 108 */
109static int __devinit sharpsl_nand_probe(struct platform_device *pdev) 109static int sharpsl_nand_probe(struct platform_device *pdev)
110{ 110{
111 struct nand_chip *this; 111 struct nand_chip *this;
112 struct resource *r; 112 struct resource *r;
@@ -205,7 +205,7 @@ err_get_res:
205/* 205/*
206 * Clean up routine 206 * Clean up routine
207 */ 207 */
208static int __devexit sharpsl_nand_remove(struct platform_device *pdev) 208static int sharpsl_nand_remove(struct platform_device *pdev)
209{ 209{
210 struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev); 210 struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
211 211
@@ -228,7 +228,7 @@ static struct platform_driver sharpsl_nand_driver = {
228 .owner = THIS_MODULE, 228 .owner = THIS_MODULE,
229 }, 229 },
230 .probe = sharpsl_nand_probe, 230 .probe = sharpsl_nand_probe,
231 .remove = __devexit_p(sharpsl_nand_remove), 231 .remove = sharpsl_nand_remove,
232}; 232};
233 233
234module_platform_driver(sharpsl_nand_driver); 234module_platform_driver(sharpsl_nand_driver);
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index f3f28fafbf7a..09dde7d27178 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -140,7 +140,7 @@ static int socrates_nand_device_ready(struct mtd_info *mtd)
140/* 140/*
141 * Probe for the NAND device. 141 * Probe for the NAND device.
142 */ 142 */
143static int __devinit socrates_nand_probe(struct platform_device *ofdev) 143static int socrates_nand_probe(struct platform_device *ofdev)
144{ 144{
145 struct socrates_nand_host *host; 145 struct socrates_nand_host *host;
146 struct mtd_info *mtd; 146 struct mtd_info *mtd;
@@ -220,7 +220,7 @@ out:
220/* 220/*
221 * Remove a NAND device. 221 * Remove a NAND device.
222 */ 222 */
223static int __devexit socrates_nand_remove(struct platform_device *ofdev) 223static int socrates_nand_remove(struct platform_device *ofdev)
224{ 224{
225 struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev); 225 struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
226 struct mtd_info *mtd = &host->mtd; 226 struct mtd_info *mtd = &host->mtd;
@@ -251,7 +251,7 @@ static struct platform_driver socrates_nand_driver = {
251 .of_match_table = socrates_nand_match, 251 .of_match_table = socrates_nand_match,
252 }, 252 },
253 .probe = socrates_nand_probe, 253 .probe = socrates_nand_probe,
254 .remove = __devexit_p(socrates_nand_remove), 254 .remove = socrates_nand_remove,
255}; 255};
256 256
257module_platform_driver(socrates_nand_driver); 257module_platform_driver(socrates_nand_driver);
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index d9127e2ed808..dbd3aa574eaf 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -71,7 +71,10 @@ static int parse_ofpart_partitions(struct mtd_info *master,
71 (*pparts)[i].name = (char *)partname; 71 (*pparts)[i].name = (char *)partname;
72 72
73 if (of_get_property(pp, "read-only", &len)) 73 if (of_get_property(pp, "read-only", &len))
74 (*pparts)[i].mask_flags = MTD_WRITEABLE; 74 (*pparts)[i].mask_flags |= MTD_WRITEABLE;
75
76 if (of_get_property(pp, "lock", &len))
77 (*pparts)[i].mask_flags |= MTD_POWERUP_LOCK;
75 78
76 i++; 79 i++;
77 } 80 }
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 1c4f97c63e62..9f11562f849d 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -35,7 +35,7 @@ struct onenand_info {
35 struct onenand_chip onenand; 35 struct onenand_chip onenand;
36}; 36};
37 37
38static int __devinit generic_onenand_probe(struct platform_device *pdev) 38static int generic_onenand_probe(struct platform_device *pdev)
39{ 39{
40 struct onenand_info *info; 40 struct onenand_info *info;
41 struct onenand_platform_data *pdata = pdev->dev.platform_data; 41 struct onenand_platform_data *pdata = pdev->dev.platform_data;
@@ -88,7 +88,7 @@ out_free_info:
88 return err; 88 return err;
89} 89}
90 90
91static int __devexit generic_onenand_remove(struct platform_device *pdev) 91static int generic_onenand_remove(struct platform_device *pdev)
92{ 92{
93 struct onenand_info *info = platform_get_drvdata(pdev); 93 struct onenand_info *info = platform_get_drvdata(pdev);
94 struct resource *res = pdev->resource; 94 struct resource *res = pdev->resource;
@@ -112,7 +112,7 @@ static struct platform_driver generic_onenand_driver = {
112 .owner = THIS_MODULE, 112 .owner = THIS_MODULE,
113 }, 113 },
114 .probe = generic_onenand_probe, 114 .probe = generic_onenand_probe,
115 .remove = __devexit_p(generic_onenand_remove), 115 .remove = generic_onenand_remove,
116}; 116};
117 117
118module_platform_driver(generic_onenand_driver); 118module_platform_driver(generic_onenand_driver);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 00cd3da29435..065f3fe02a2f 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -630,7 +630,7 @@ static int omap2_onenand_disable(struct mtd_info *mtd)
630 return ret; 630 return ret;
631} 631}
632 632
633static int __devinit omap2_onenand_probe(struct platform_device *pdev) 633static int omap2_onenand_probe(struct platform_device *pdev)
634{ 634{
635 struct omap_onenand_platform_data *pdata; 635 struct omap_onenand_platform_data *pdata;
636 struct omap2_onenand *c; 636 struct omap2_onenand *c;
@@ -799,7 +799,7 @@ err_kfree:
799 return r; 799 return r;
800} 800}
801 801
802static int __devexit omap2_onenand_remove(struct platform_device *pdev) 802static int omap2_onenand_remove(struct platform_device *pdev)
803{ 803{
804 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev); 804 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
805 805
@@ -822,7 +822,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
822 822
823static struct platform_driver omap2_onenand_driver = { 823static struct platform_driver omap2_onenand_driver = {
824 .probe = omap2_onenand_probe, 824 .probe = omap2_onenand_probe,
825 .remove = __devexit_p(omap2_onenand_remove), 825 .remove = omap2_onenand_remove,
826 .shutdown = omap2_onenand_shutdown, 826 .shutdown = omap2_onenand_shutdown,
827 .driver = { 827 .driver = {
828 .name = DRIVER_NAME, 828 .name = DRIVER_NAME,
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 8e4b3f2742ba..33f2a8fb8df9 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -1053,7 +1053,7 @@ onenand_fail:
1053 return err; 1053 return err;
1054} 1054}
1055 1055
1056static int __devexit s3c_onenand_remove(struct platform_device *pdev) 1056static int s3c_onenand_remove(struct platform_device *pdev)
1057{ 1057{
1058 struct mtd_info *mtd = platform_get_drvdata(pdev); 1058 struct mtd_info *mtd = platform_get_drvdata(pdev);
1059 1059
@@ -1130,7 +1130,7 @@ static struct platform_driver s3c_onenand_driver = {
1130 }, 1130 },
1131 .id_table = s3c_onenand_driver_ids, 1131 .id_table = s3c_onenand_driver_ids,
1132 .probe = s3c_onenand_probe, 1132 .probe = s3c_onenand_probe,
1133 .remove = __devexit_p(s3c_onenand_remove), 1133 .remove = s3c_onenand_remove,
1134}; 1134};
1135 1135
1136module_platform_driver(s3c_onenand_driver); 1136module_platform_driver(s3c_onenand_driver);
diff --git a/drivers/mtd/tests/mtd_nandbiterrs.c b/drivers/mtd/tests/mtd_nandbiterrs.c
index cc8d62cb280c..207bf9a9972f 100644
--- a/drivers/mtd/tests/mtd_nandbiterrs.c
+++ b/drivers/mtd/tests/mtd_nandbiterrs.c
@@ -39,6 +39,9 @@
39 * this program; see the file COPYING. If not, write to the Free Software 39 * this program; see the file COPYING. If not, write to the Free Software
40 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 40 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
41 */ 41 */
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
42#include <linux/init.h> 45#include <linux/init.h>
43#include <linux/module.h> 46#include <linux/module.h>
44#include <linux/moduleparam.h> 47#include <linux/moduleparam.h>
@@ -47,8 +50,6 @@
47#include <linux/mtd/nand.h> 50#include <linux/mtd/nand.h>
48#include <linux/slab.h> 51#include <linux/slab.h>
49 52
50#define msg(FMT, VA...) pr_info("mtd_nandbiterrs: "FMT, ##VA)
51
52static int dev; 53static int dev;
53module_param(dev, int, S_IRUGO); 54module_param(dev, int, S_IRUGO);
54MODULE_PARM_DESC(dev, "MTD device number to use"); 55MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -103,7 +104,7 @@ static int erase_block(void)
103 struct erase_info ei; 104 struct erase_info ei;
104 loff_t addr = eraseblock * mtd->erasesize; 105 loff_t addr = eraseblock * mtd->erasesize;
105 106
106 msg("erase_block\n"); 107 pr_info("erase_block\n");
107 108
108 memset(&ei, 0, sizeof(struct erase_info)); 109 memset(&ei, 0, sizeof(struct erase_info));
109 ei.mtd = mtd; 110 ei.mtd = mtd;
@@ -112,7 +113,7 @@ static int erase_block(void)
112 113
113 err = mtd_erase(mtd, &ei); 114 err = mtd_erase(mtd, &ei);
114 if (err || ei.state == MTD_ERASE_FAILED) { 115 if (err || ei.state == MTD_ERASE_FAILED) {
115 msg("error %d while erasing\n", err); 116 pr_err("error %d while erasing\n", err);
116 if (!err) 117 if (!err)
117 err = -EIO; 118 err = -EIO;
118 return err; 119 return err;
@@ -128,11 +129,11 @@ static int write_page(int log)
128 size_t written; 129 size_t written;
129 130
130 if (log) 131 if (log)
131 msg("write_page\n"); 132 pr_info("write_page\n");
132 133
133 err = mtd_write(mtd, offset, mtd->writesize, &written, wbuffer); 134 err = mtd_write(mtd, offset, mtd->writesize, &written, wbuffer);
134 if (err || written != mtd->writesize) { 135 if (err || written != mtd->writesize) {
135 msg("error: write failed at %#llx\n", (long long)offset); 136 pr_err("error: write failed at %#llx\n", (long long)offset);
136 if (!err) 137 if (!err)
137 err = -EIO; 138 err = -EIO;
138 } 139 }
@@ -147,7 +148,7 @@ static int rewrite_page(int log)
147 struct mtd_oob_ops ops; 148 struct mtd_oob_ops ops;
148 149
149 if (log) 150 if (log)
150 msg("rewrite page\n"); 151 pr_info("rewrite page\n");
151 152
152 ops.mode = MTD_OPS_RAW; /* No ECC */ 153 ops.mode = MTD_OPS_RAW; /* No ECC */
153 ops.len = mtd->writesize; 154 ops.len = mtd->writesize;
@@ -160,7 +161,7 @@ static int rewrite_page(int log)
160 161
161 err = mtd_write_oob(mtd, offset, &ops); 162 err = mtd_write_oob(mtd, offset, &ops);
162 if (err || ops.retlen != mtd->writesize) { 163 if (err || ops.retlen != mtd->writesize) {
163 msg("error: write_oob failed (%d)\n", err); 164 pr_err("error: write_oob failed (%d)\n", err);
164 if (!err) 165 if (!err)
165 err = -EIO; 166 err = -EIO;
166 } 167 }
@@ -177,7 +178,7 @@ static int read_page(int log)
177 struct mtd_ecc_stats oldstats; 178 struct mtd_ecc_stats oldstats;
178 179
179 if (log) 180 if (log)
180 msg("read_page\n"); 181 pr_info("read_page\n");
181 182
182 /* Saving last mtd stats */ 183 /* Saving last mtd stats */
183 memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats)); 184 memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
@@ -187,7 +188,7 @@ static int read_page(int log)
187 err = mtd->ecc_stats.corrected - oldstats.corrected; 188 err = mtd->ecc_stats.corrected - oldstats.corrected;
188 189
189 if (err < 0 || read != mtd->writesize) { 190 if (err < 0 || read != mtd->writesize) {
190 msg("error: read failed at %#llx\n", (long long)offset); 191 pr_err("error: read failed at %#llx\n", (long long)offset);
191 if (err >= 0) 192 if (err >= 0)
192 err = -EIO; 193 err = -EIO;
193 } 194 }
@@ -201,11 +202,11 @@ static int verify_page(int log)
201 unsigned i, errs = 0; 202 unsigned i, errs = 0;
202 203
203 if (log) 204 if (log)
204 msg("verify_page\n"); 205 pr_info("verify_page\n");
205 206
206 for (i = 0; i < mtd->writesize; i++) { 207 for (i = 0; i < mtd->writesize; i++) {
207 if (rbuffer[i] != hash(i+seed)) { 208 if (rbuffer[i] != hash(i+seed)) {
208 msg("Error: page offset %u, expected %02x, got %02x\n", 209 pr_err("Error: page offset %u, expected %02x, got %02x\n",
209 i, hash(i+seed), rbuffer[i]); 210 i, hash(i+seed), rbuffer[i]);
210 errs++; 211 errs++;
211 } 212 }
@@ -230,13 +231,13 @@ static int insert_biterror(unsigned byte)
230 for (bit = 7; bit >= 0; bit--) { 231 for (bit = 7; bit >= 0; bit--) {
231 if (CBIT(wbuffer[byte], bit)) { 232 if (CBIT(wbuffer[byte], bit)) {
232 BCLR(wbuffer[byte], bit); 233 BCLR(wbuffer[byte], bit);
233 msg("Inserted biterror @ %u/%u\n", byte, bit); 234 pr_info("Inserted biterror @ %u/%u\n", byte, bit);
234 return 0; 235 return 0;
235 } 236 }
236 } 237 }
237 byte++; 238 byte++;
238 } 239 }
239 msg("biterror: Failed to find a '1' bit\n"); 240 pr_err("biterror: Failed to find a '1' bit\n");
240 return -EIO; 241 return -EIO;
241} 242}
242 243
@@ -248,7 +249,7 @@ static int incremental_errors_test(void)
248 unsigned i; 249 unsigned i;
249 unsigned errs_per_subpage = 0; 250 unsigned errs_per_subpage = 0;
250 251
251 msg("incremental biterrors test\n"); 252 pr_info("incremental biterrors test\n");
252 253
253 for (i = 0; i < mtd->writesize; i++) 254 for (i = 0; i < mtd->writesize; i++)
254 wbuffer[i] = hash(i+seed); 255 wbuffer[i] = hash(i+seed);
@@ -265,9 +266,9 @@ static int incremental_errors_test(void)
265 266
266 err = read_page(1); 267 err = read_page(1);
267 if (err > 0) 268 if (err > 0)
268 msg("Read reported %d corrected bit errors\n", err); 269 pr_info("Read reported %d corrected bit errors\n", err);
269 if (err < 0) { 270 if (err < 0) {
270 msg("After %d biterrors per subpage, read reported error %d\n", 271 pr_err("After %d biterrors per subpage, read reported error %d\n",
271 errs_per_subpage, err); 272 errs_per_subpage, err);
272 err = 0; 273 err = 0;
273 goto exit; 274 goto exit;
@@ -275,11 +276,11 @@ static int incremental_errors_test(void)
275 276
276 err = verify_page(1); 277 err = verify_page(1);
277 if (err) { 278 if (err) {
278 msg("ECC failure, read data is incorrect despite read success\n"); 279 pr_err("ECC failure, read data is incorrect despite read success\n");
279 goto exit; 280 goto exit;
280 } 281 }
281 282
282 msg("Successfully corrected %d bit errors per subpage\n", 283 pr_info("Successfully corrected %d bit errors per subpage\n",
283 errs_per_subpage); 284 errs_per_subpage);
284 285
285 for (i = 0; i < subcount; i++) { 286 for (i = 0; i < subcount; i++) {
@@ -311,7 +312,7 @@ static int overwrite_test(void)
311 312
312 memset(bitstats, 0, sizeof(bitstats)); 313 memset(bitstats, 0, sizeof(bitstats));
313 314
314 msg("overwrite biterrors test\n"); 315 pr_info("overwrite biterrors test\n");
315 316
316 for (i = 0; i < mtd->writesize; i++) 317 for (i = 0; i < mtd->writesize; i++)
317 wbuffer[i] = hash(i+seed); 318 wbuffer[i] = hash(i+seed);
@@ -329,18 +330,18 @@ static int overwrite_test(void)
329 err = read_page(0); 330 err = read_page(0);
330 if (err >= 0) { 331 if (err >= 0) {
331 if (err >= MAXBITS) { 332 if (err >= MAXBITS) {
332 msg("Implausible number of bit errors corrected\n"); 333 pr_info("Implausible number of bit errors corrected\n");
333 err = -EIO; 334 err = -EIO;
334 break; 335 break;
335 } 336 }
336 bitstats[err]++; 337 bitstats[err]++;
337 if (err > max_corrected) { 338 if (err > max_corrected) {
338 max_corrected = err; 339 max_corrected = err;
339 msg("Read reported %d corrected bit errors\n", 340 pr_info("Read reported %d corrected bit errors\n",
340 err); 341 err);
341 } 342 }
342 } else { /* err < 0 */ 343 } else { /* err < 0 */
343 msg("Read reported error %d\n", err); 344 pr_info("Read reported error %d\n", err);
344 err = 0; 345 err = 0;
345 break; 346 break;
346 } 347 }
@@ -348,7 +349,7 @@ static int overwrite_test(void)
348 err = verify_page(0); 349 err = verify_page(0);
349 if (err) { 350 if (err) {
350 bitstats[max_corrected] = opno; 351 bitstats[max_corrected] = opno;
351 msg("ECC failure, read data is incorrect despite read success\n"); 352 pr_info("ECC failure, read data is incorrect despite read success\n");
352 break; 353 break;
353 } 354 }
354 355
@@ -357,9 +358,9 @@ static int overwrite_test(void)
357 358
358 /* At this point bitstats[0] contains the number of ops with no bit 359 /* At this point bitstats[0] contains the number of ops with no bit
359 * errors, bitstats[1] the number of ops with 1 bit error, etc. */ 360 * errors, bitstats[1] the number of ops with 1 bit error, etc. */
360 msg("Bit error histogram (%d operations total):\n", opno); 361 pr_info("Bit error histogram (%d operations total):\n", opno);
361 for (i = 0; i < max_corrected; i++) 362 for (i = 0; i < max_corrected; i++)
362 msg("Page reads with %3d corrected bit errors: %d\n", 363 pr_info("Page reads with %3d corrected bit errors: %d\n",
363 i, bitstats[i]); 364 i, bitstats[i]);
364 365
365exit: 366exit:
@@ -370,36 +371,36 @@ static int __init mtd_nandbiterrs_init(void)
370{ 371{
371 int err = 0; 372 int err = 0;
372 373
373 msg("\n"); 374 printk("\n");
374 msg("==================================================\n"); 375 printk(KERN_INFO "==================================================\n");
375 msg("MTD device: %d\n", dev); 376 pr_info("MTD device: %d\n", dev);
376 377
377 mtd = get_mtd_device(NULL, dev); 378 mtd = get_mtd_device(NULL, dev);
378 if (IS_ERR(mtd)) { 379 if (IS_ERR(mtd)) {
379 err = PTR_ERR(mtd); 380 err = PTR_ERR(mtd);
380 msg("error: cannot get MTD device\n"); 381 pr_err("error: cannot get MTD device\n");
381 goto exit_mtddev; 382 goto exit_mtddev;
382 } 383 }
383 384
384 if (mtd->type != MTD_NANDFLASH) { 385 if (mtd->type != MTD_NANDFLASH) {
385 msg("this test requires NAND flash\n"); 386 pr_info("this test requires NAND flash\n");
386 err = -ENODEV; 387 err = -ENODEV;
387 goto exit_nand; 388 goto exit_nand;
388 } 389 }
389 390
390 msg("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n", 391 pr_info("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n",
391 (unsigned long long)mtd->size, mtd->erasesize, 392 (unsigned long long)mtd->size, mtd->erasesize,
392 mtd->writesize, mtd->oobsize); 393 mtd->writesize, mtd->oobsize);
393 394
394 subsize = mtd->writesize >> mtd->subpage_sft; 395 subsize = mtd->writesize >> mtd->subpage_sft;
395 subcount = mtd->writesize / subsize; 396 subcount = mtd->writesize / subsize;
396 397
397 msg("Device uses %d subpages of %d bytes\n", subcount, subsize); 398 pr_info("Device uses %d subpages of %d bytes\n", subcount, subsize);
398 399
399 offset = page_offset * mtd->writesize; 400 offset = page_offset * mtd->writesize;
400 eraseblock = mtd_div_by_eb(offset, mtd); 401 eraseblock = mtd_div_by_eb(offset, mtd);
401 402
402 msg("Using page=%u, offset=%llu, eraseblock=%u\n", 403 pr_info("Using page=%u, offset=%llu, eraseblock=%u\n",
403 page_offset, offset, eraseblock); 404 page_offset, offset, eraseblock);
404 405
405 wbuffer = kmalloc(mtd->writesize, GFP_KERNEL); 406 wbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
@@ -432,8 +433,8 @@ static int __init mtd_nandbiterrs_init(void)
432 goto exit_error; 433 goto exit_error;
433 434
434 err = -EIO; 435 err = -EIO;
435 msg("finished successfully.\n"); 436 pr_info("finished successfully.\n");
436 msg("==================================================\n"); 437 printk(KERN_INFO "==================================================\n");
437 438
438exit_error: 439exit_error:
439 kfree(rbuffer); 440 kfree(rbuffer);
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index b437fa425077..1eee264509a8 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
1#include <linux/kernel.h> 3#include <linux/kernel.h>
2#include <linux/module.h> 4#include <linux/module.h>
3#include <linux/list.h> 5#include <linux/list.h>
@@ -264,13 +266,13 @@ static int nand_ecc_test_run(const size_t size)
264 correct_data, size); 266 correct_data, size);
265 267
266 if (err) { 268 if (err) {
267 pr_err("mtd_nandecctest: not ok - %s-%zd\n", 269 pr_err("not ok - %s-%zd\n",
268 nand_ecc_test[i].name, size); 270 nand_ecc_test[i].name, size);
269 dump_data_ecc(error_data, error_ecc, 271 dump_data_ecc(error_data, error_ecc,
270 correct_data, correct_ecc, size); 272 correct_data, correct_ecc, size);
271 break; 273 break;
272 } 274 }
273 pr_info("mtd_nandecctest: ok - %s-%zd\n", 275 pr_info("ok - %s-%zd\n",
274 nand_ecc_test[i].name, size); 276 nand_ecc_test[i].name, size);
275 } 277 }
276error: 278error:
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index ed9b62827f1b..e827fa8cd844 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -19,6 +19,8 @@
19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> 19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
22#include <asm/div64.h> 24#include <asm/div64.h>
23#include <linux/init.h> 25#include <linux/init.h>
24#include <linux/module.h> 26#include <linux/module.h>
@@ -28,8 +30,6 @@
28#include <linux/slab.h> 30#include <linux/slab.h>
29#include <linux/sched.h> 31#include <linux/sched.h>
30 32
31#define PRINT_PREF KERN_INFO "mtd_oobtest: "
32
33static int dev = -EINVAL; 33static int dev = -EINVAL;
34module_param(dev, int, S_IRUGO); 34module_param(dev, int, S_IRUGO);
35MODULE_PARM_DESC(dev, "MTD device number to use"); 35MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -80,13 +80,12 @@ static int erase_eraseblock(int ebnum)
80 80
81 err = mtd_erase(mtd, &ei); 81 err = mtd_erase(mtd, &ei);
82 if (err) { 82 if (err) {
83 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 83 pr_err("error %d while erasing EB %d\n", err, ebnum);
84 return err; 84 return err;
85 } 85 }
86 86
87 if (ei.state == MTD_ERASE_FAILED) { 87 if (ei.state == MTD_ERASE_FAILED) {
88 printk(PRINT_PREF "some erase error occurred at EB %d\n", 88 pr_err("some erase error occurred at EB %d\n", ebnum);
89 ebnum);
90 return -EIO; 89 return -EIO;
91 } 90 }
92 91
@@ -98,7 +97,7 @@ static int erase_whole_device(void)
98 int err; 97 int err;
99 unsigned int i; 98 unsigned int i;
100 99
101 printk(PRINT_PREF "erasing whole device\n"); 100 pr_info("erasing whole device\n");
102 for (i = 0; i < ebcnt; ++i) { 101 for (i = 0; i < ebcnt; ++i) {
103 if (bbt[i]) 102 if (bbt[i])
104 continue; 103 continue;
@@ -107,7 +106,7 @@ static int erase_whole_device(void)
107 return err; 106 return err;
108 cond_resched(); 107 cond_resched();
109 } 108 }
110 printk(PRINT_PREF "erased %u eraseblocks\n", i); 109 pr_info("erased %u eraseblocks\n", i);
111 return 0; 110 return 0;
112} 111}
113 112
@@ -141,9 +140,9 @@ static int write_eraseblock(int ebnum)
141 ops.oobbuf = writebuf; 140 ops.oobbuf = writebuf;
142 err = mtd_write_oob(mtd, addr, &ops); 141 err = mtd_write_oob(mtd, addr, &ops);
143 if (err || ops.oobretlen != use_len) { 142 if (err || ops.oobretlen != use_len) {
144 printk(PRINT_PREF "error: writeoob failed at %#llx\n", 143 pr_err("error: writeoob failed at %#llx\n",
145 (long long)addr); 144 (long long)addr);
146 printk(PRINT_PREF "error: use_len %d, use_offset %d\n", 145 pr_err("error: use_len %d, use_offset %d\n",
147 use_len, use_offset); 146 use_len, use_offset);
148 errcnt += 1; 147 errcnt += 1;
149 return err ? err : -1; 148 return err ? err : -1;
@@ -160,7 +159,7 @@ static int write_whole_device(void)
160 int err; 159 int err;
161 unsigned int i; 160 unsigned int i;
162 161
163 printk(PRINT_PREF "writing OOBs of whole device\n"); 162 pr_info("writing OOBs of whole device\n");
164 for (i = 0; i < ebcnt; ++i) { 163 for (i = 0; i < ebcnt; ++i) {
165 if (bbt[i]) 164 if (bbt[i])
166 continue; 165 continue;
@@ -168,10 +167,10 @@ static int write_whole_device(void)
168 if (err) 167 if (err)
169 return err; 168 return err;
170 if (i % 256 == 0) 169 if (i % 256 == 0)
171 printk(PRINT_PREF "written up to eraseblock %u\n", i); 170 pr_info("written up to eraseblock %u\n", i);
172 cond_resched(); 171 cond_resched();
173 } 172 }
174 printk(PRINT_PREF "written %u eraseblocks\n", i); 173 pr_info("written %u eraseblocks\n", i);
175 return 0; 174 return 0;
176} 175}
177 176
@@ -194,17 +193,17 @@ static int verify_eraseblock(int ebnum)
194 ops.oobbuf = readbuf; 193 ops.oobbuf = readbuf;
195 err = mtd_read_oob(mtd, addr, &ops); 194 err = mtd_read_oob(mtd, addr, &ops);
196 if (err || ops.oobretlen != use_len) { 195 if (err || ops.oobretlen != use_len) {
197 printk(PRINT_PREF "error: readoob failed at %#llx\n", 196 pr_err("error: readoob failed at %#llx\n",
198 (long long)addr); 197 (long long)addr);
199 errcnt += 1; 198 errcnt += 1;
200 return err ? err : -1; 199 return err ? err : -1;
201 } 200 }
202 if (memcmp(readbuf, writebuf, use_len)) { 201 if (memcmp(readbuf, writebuf, use_len)) {
203 printk(PRINT_PREF "error: verify failed at %#llx\n", 202 pr_err("error: verify failed at %#llx\n",
204 (long long)addr); 203 (long long)addr);
205 errcnt += 1; 204 errcnt += 1;
206 if (errcnt > 1000) { 205 if (errcnt > 1000) {
207 printk(PRINT_PREF "error: too many errors\n"); 206 pr_err("error: too many errors\n");
208 return -1; 207 return -1;
209 } 208 }
210 } 209 }
@@ -221,29 +220,28 @@ static int verify_eraseblock(int ebnum)
221 ops.oobbuf = readbuf; 220 ops.oobbuf = readbuf;
222 err = mtd_read_oob(mtd, addr, &ops); 221 err = mtd_read_oob(mtd, addr, &ops);
223 if (err || ops.oobretlen != mtd->ecclayout->oobavail) { 222 if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
224 printk(PRINT_PREF "error: readoob failed at " 223 pr_err("error: readoob failed at %#llx\n",
225 "%#llx\n", (long long)addr); 224 (long long)addr);
226 errcnt += 1; 225 errcnt += 1;
227 return err ? err : -1; 226 return err ? err : -1;
228 } 227 }
229 if (memcmp(readbuf + use_offset, writebuf, use_len)) { 228 if (memcmp(readbuf + use_offset, writebuf, use_len)) {
230 printk(PRINT_PREF "error: verify failed at " 229 pr_err("error: verify failed at %#llx\n",
231 "%#llx\n", (long long)addr); 230 (long long)addr);
232 errcnt += 1; 231 errcnt += 1;
233 if (errcnt > 1000) { 232 if (errcnt > 1000) {
234 printk(PRINT_PREF "error: too many " 233 pr_err("error: too many errors\n");
235 "errors\n");
236 return -1; 234 return -1;
237 } 235 }
238 } 236 }
239 for (k = 0; k < use_offset; ++k) 237 for (k = 0; k < use_offset; ++k)
240 if (readbuf[k] != 0xff) { 238 if (readbuf[k] != 0xff) {
241 printk(PRINT_PREF "error: verify 0xff " 239 pr_err("error: verify 0xff "
242 "failed at %#llx\n", 240 "failed at %#llx\n",
243 (long long)addr); 241 (long long)addr);
244 errcnt += 1; 242 errcnt += 1;
245 if (errcnt > 1000) { 243 if (errcnt > 1000) {
246 printk(PRINT_PREF "error: too " 244 pr_err("error: too "
247 "many errors\n"); 245 "many errors\n");
248 return -1; 246 return -1;
249 } 247 }
@@ -251,12 +249,12 @@ static int verify_eraseblock(int ebnum)
251 for (k = use_offset + use_len; 249 for (k = use_offset + use_len;
252 k < mtd->ecclayout->oobavail; ++k) 250 k < mtd->ecclayout->oobavail; ++k)
253 if (readbuf[k] != 0xff) { 251 if (readbuf[k] != 0xff) {
254 printk(PRINT_PREF "error: verify 0xff " 252 pr_err("error: verify 0xff "
255 "failed at %#llx\n", 253 "failed at %#llx\n",
256 (long long)addr); 254 (long long)addr);
257 errcnt += 1; 255 errcnt += 1;
258 if (errcnt > 1000) { 256 if (errcnt > 1000) {
259 printk(PRINT_PREF "error: too " 257 pr_err("error: too "
260 "many errors\n"); 258 "many errors\n");
261 return -1; 259 return -1;
262 } 260 }
@@ -286,17 +284,17 @@ static int verify_eraseblock_in_one_go(int ebnum)
286 ops.oobbuf = readbuf; 284 ops.oobbuf = readbuf;
287 err = mtd_read_oob(mtd, addr, &ops); 285 err = mtd_read_oob(mtd, addr, &ops);
288 if (err || ops.oobretlen != len) { 286 if (err || ops.oobretlen != len) {
289 printk(PRINT_PREF "error: readoob failed at %#llx\n", 287 pr_err("error: readoob failed at %#llx\n",
290 (long long)addr); 288 (long long)addr);
291 errcnt += 1; 289 errcnt += 1;
292 return err ? err : -1; 290 return err ? err : -1;
293 } 291 }
294 if (memcmp(readbuf, writebuf, len)) { 292 if (memcmp(readbuf, writebuf, len)) {
295 printk(PRINT_PREF "error: verify failed at %#llx\n", 293 pr_err("error: verify failed at %#llx\n",
296 (long long)addr); 294 (long long)addr);
297 errcnt += 1; 295 errcnt += 1;
298 if (errcnt > 1000) { 296 if (errcnt > 1000) {
299 printk(PRINT_PREF "error: too many errors\n"); 297 pr_err("error: too many errors\n");
300 return -1; 298 return -1;
301 } 299 }
302 } 300 }
@@ -309,7 +307,7 @@ static int verify_all_eraseblocks(void)
309 int err; 307 int err;
310 unsigned int i; 308 unsigned int i;
311 309
312 printk(PRINT_PREF "verifying all eraseblocks\n"); 310 pr_info("verifying all eraseblocks\n");
313 for (i = 0; i < ebcnt; ++i) { 311 for (i = 0; i < ebcnt; ++i) {
314 if (bbt[i]) 312 if (bbt[i])
315 continue; 313 continue;
@@ -317,10 +315,10 @@ static int verify_all_eraseblocks(void)
317 if (err) 315 if (err)
318 return err; 316 return err;
319 if (i % 256 == 0) 317 if (i % 256 == 0)
320 printk(PRINT_PREF "verified up to eraseblock %u\n", i); 318 pr_info("verified up to eraseblock %u\n", i);
321 cond_resched(); 319 cond_resched();
322 } 320 }
323 printk(PRINT_PREF "verified %u eraseblocks\n", i); 321 pr_info("verified %u eraseblocks\n", i);
324 return 0; 322 return 0;
325} 323}
326 324
@@ -331,7 +329,7 @@ static int is_block_bad(int ebnum)
331 329
332 ret = mtd_block_isbad(mtd, addr); 330 ret = mtd_block_isbad(mtd, addr);
333 if (ret) 331 if (ret)
334 printk(PRINT_PREF "block %d is bad\n", ebnum); 332 pr_info("block %d is bad\n", ebnum);
335 return ret; 333 return ret;
336} 334}
337 335
@@ -341,18 +339,18 @@ static int scan_for_bad_eraseblocks(void)
341 339
342 bbt = kmalloc(ebcnt, GFP_KERNEL); 340 bbt = kmalloc(ebcnt, GFP_KERNEL);
343 if (!bbt) { 341 if (!bbt) {
344 printk(PRINT_PREF "error: cannot allocate memory\n"); 342 pr_err("error: cannot allocate memory\n");
345 return -ENOMEM; 343 return -ENOMEM;
346 } 344 }
347 345
348 printk(PRINT_PREF "scanning for bad eraseblocks\n"); 346 pr_info("scanning for bad eraseblocks\n");
349 for (i = 0; i < ebcnt; ++i) { 347 for (i = 0; i < ebcnt; ++i) {
350 bbt[i] = is_block_bad(i) ? 1 : 0; 348 bbt[i] = is_block_bad(i) ? 1 : 0;
351 if (bbt[i]) 349 if (bbt[i])
352 bad += 1; 350 bad += 1;
353 cond_resched(); 351 cond_resched();
354 } 352 }
355 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); 353 pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
356 return 0; 354 return 0;
357} 355}
358 356
@@ -368,22 +366,22 @@ static int __init mtd_oobtest_init(void)
368 printk(KERN_INFO "=================================================\n"); 366 printk(KERN_INFO "=================================================\n");
369 367
370 if (dev < 0) { 368 if (dev < 0) {
371 printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 369 pr_info("Please specify a valid mtd-device via module parameter\n");
372 printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 370 pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
373 return -EINVAL; 371 return -EINVAL;
374 } 372 }
375 373
376 printk(PRINT_PREF "MTD device: %d\n", dev); 374 pr_info("MTD device: %d\n", dev);
377 375
378 mtd = get_mtd_device(NULL, dev); 376 mtd = get_mtd_device(NULL, dev);
379 if (IS_ERR(mtd)) { 377 if (IS_ERR(mtd)) {
380 err = PTR_ERR(mtd); 378 err = PTR_ERR(mtd);
381 printk(PRINT_PREF "error: cannot get MTD device\n"); 379 pr_err("error: cannot get MTD device\n");
382 return err; 380 return err;
383 } 381 }
384 382
385 if (mtd->type != MTD_NANDFLASH) { 383 if (mtd->type != MTD_NANDFLASH) {
386 printk(PRINT_PREF "this test requires NAND flash\n"); 384 pr_info("this test requires NAND flash\n");
387 goto out; 385 goto out;
388 } 386 }
389 387
@@ -392,7 +390,7 @@ static int __init mtd_oobtest_init(void)
392 ebcnt = tmp; 390 ebcnt = tmp;
393 pgcnt = mtd->erasesize / mtd->writesize; 391 pgcnt = mtd->erasesize / mtd->writesize;
394 392
395 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 393 pr_info("MTD device size %llu, eraseblock size %u, "
396 "page size %u, count of eraseblocks %u, pages per " 394 "page size %u, count of eraseblocks %u, pages per "
397 "eraseblock %u, OOB size %u\n", 395 "eraseblock %u, OOB size %u\n",
398 (unsigned long long)mtd->size, mtd->erasesize, 396 (unsigned long long)mtd->size, mtd->erasesize,
@@ -401,12 +399,12 @@ static int __init mtd_oobtest_init(void)
401 err = -ENOMEM; 399 err = -ENOMEM;
402 readbuf = kmalloc(mtd->erasesize, GFP_KERNEL); 400 readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
403 if (!readbuf) { 401 if (!readbuf) {
404 printk(PRINT_PREF "error: cannot allocate memory\n"); 402 pr_err("error: cannot allocate memory\n");
405 goto out; 403 goto out;
406 } 404 }
407 writebuf = kmalloc(mtd->erasesize, GFP_KERNEL); 405 writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
408 if (!writebuf) { 406 if (!writebuf) {
409 printk(PRINT_PREF "error: cannot allocate memory\n"); 407 pr_err("error: cannot allocate memory\n");
410 goto out; 408 goto out;
411 } 409 }
412 410
@@ -420,7 +418,7 @@ static int __init mtd_oobtest_init(void)
420 vary_offset = 0; 418 vary_offset = 0;
421 419
422 /* First test: write all OOB, read it back and verify */ 420 /* First test: write all OOB, read it back and verify */
423 printk(PRINT_PREF "test 1 of 5\n"); 421 pr_info("test 1 of 5\n");
424 422
425 err = erase_whole_device(); 423 err = erase_whole_device();
426 if (err) 424 if (err)
@@ -440,7 +438,7 @@ static int __init mtd_oobtest_init(void)
440 * Second test: write all OOB, a block at a time, read it back and 438 * Second test: write all OOB, a block at a time, read it back and
441 * verify. 439 * verify.
442 */ 440 */
443 printk(PRINT_PREF "test 2 of 5\n"); 441 pr_info("test 2 of 5\n");
444 442
445 err = erase_whole_device(); 443 err = erase_whole_device();
446 if (err) 444 if (err)
@@ -453,7 +451,7 @@ static int __init mtd_oobtest_init(void)
453 451
454 /* Check all eraseblocks */ 452 /* Check all eraseblocks */
455 simple_srand(3); 453 simple_srand(3);
456 printk(PRINT_PREF "verifying all eraseblocks\n"); 454 pr_info("verifying all eraseblocks\n");
457 for (i = 0; i < ebcnt; ++i) { 455 for (i = 0; i < ebcnt; ++i) {
458 if (bbt[i]) 456 if (bbt[i])
459 continue; 457 continue;
@@ -461,16 +459,16 @@ static int __init mtd_oobtest_init(void)
461 if (err) 459 if (err)
462 goto out; 460 goto out;
463 if (i % 256 == 0) 461 if (i % 256 == 0)
464 printk(PRINT_PREF "verified up to eraseblock %u\n", i); 462 pr_info("verified up to eraseblock %u\n", i);
465 cond_resched(); 463 cond_resched();
466 } 464 }
467 printk(PRINT_PREF "verified %u eraseblocks\n", i); 465 pr_info("verified %u eraseblocks\n", i);
468 466
469 /* 467 /*
470 * Third test: write OOB at varying offsets and lengths, read it back 468 * Third test: write OOB at varying offsets and lengths, read it back
471 * and verify. 469 * and verify.
472 */ 470 */
473 printk(PRINT_PREF "test 3 of 5\n"); 471 pr_info("test 3 of 5\n");
474 472
475 err = erase_whole_device(); 473 err = erase_whole_device();
476 if (err) 474 if (err)
@@ -503,7 +501,7 @@ static int __init mtd_oobtest_init(void)
503 vary_offset = 0; 501 vary_offset = 0;
504 502
505 /* Fourth test: try to write off end of device */ 503 /* Fourth test: try to write off end of device */
506 printk(PRINT_PREF "test 4 of 5\n"); 504 pr_info("test 4 of 5\n");
507 505
508 err = erase_whole_device(); 506 err = erase_whole_device();
509 if (err) 507 if (err)
@@ -522,14 +520,14 @@ static int __init mtd_oobtest_init(void)
522 ops.ooboffs = mtd->ecclayout->oobavail; 520 ops.ooboffs = mtd->ecclayout->oobavail;
523 ops.datbuf = NULL; 521 ops.datbuf = NULL;
524 ops.oobbuf = writebuf; 522 ops.oobbuf = writebuf;
525 printk(PRINT_PREF "attempting to start write past end of OOB\n"); 523 pr_info("attempting to start write past end of OOB\n");
526 printk(PRINT_PREF "an error is expected...\n"); 524 pr_info("an error is expected...\n");
527 err = mtd_write_oob(mtd, addr0, &ops); 525 err = mtd_write_oob(mtd, addr0, &ops);
528 if (err) { 526 if (err) {
529 printk(PRINT_PREF "error occurred as expected\n"); 527 pr_info("error occurred as expected\n");
530 err = 0; 528 err = 0;
531 } else { 529 } else {
532 printk(PRINT_PREF "error: can write past end of OOB\n"); 530 pr_err("error: can write past end of OOB\n");
533 errcnt += 1; 531 errcnt += 1;
534 } 532 }
535 533
@@ -542,19 +540,19 @@ static int __init mtd_oobtest_init(void)
542 ops.ooboffs = mtd->ecclayout->oobavail; 540 ops.ooboffs = mtd->ecclayout->oobavail;
543 ops.datbuf = NULL; 541 ops.datbuf = NULL;
544 ops.oobbuf = readbuf; 542 ops.oobbuf = readbuf;
545 printk(PRINT_PREF "attempting to start read past end of OOB\n"); 543 pr_info("attempting to start read past end of OOB\n");
546 printk(PRINT_PREF "an error is expected...\n"); 544 pr_info("an error is expected...\n");
547 err = mtd_read_oob(mtd, addr0, &ops); 545 err = mtd_read_oob(mtd, addr0, &ops);
548 if (err) { 546 if (err) {
549 printk(PRINT_PREF "error occurred as expected\n"); 547 pr_info("error occurred as expected\n");
550 err = 0; 548 err = 0;
551 } else { 549 } else {
552 printk(PRINT_PREF "error: can read past end of OOB\n"); 550 pr_err("error: can read past end of OOB\n");
553 errcnt += 1; 551 errcnt += 1;
554 } 552 }
555 553
556 if (bbt[ebcnt - 1]) 554 if (bbt[ebcnt - 1])
557 printk(PRINT_PREF "skipping end of device tests because last " 555 pr_info("skipping end of device tests because last "
558 "block is bad\n"); 556 "block is bad\n");
559 else { 557 else {
560 /* Attempt to write off end of device */ 558 /* Attempt to write off end of device */
@@ -566,14 +564,14 @@ static int __init mtd_oobtest_init(void)
566 ops.ooboffs = 0; 564 ops.ooboffs = 0;
567 ops.datbuf = NULL; 565 ops.datbuf = NULL;
568 ops.oobbuf = writebuf; 566 ops.oobbuf = writebuf;
569 printk(PRINT_PREF "attempting to write past end of device\n"); 567 pr_info("attempting to write past end of device\n");
570 printk(PRINT_PREF "an error is expected...\n"); 568 pr_info("an error is expected...\n");
571 err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops); 569 err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
572 if (err) { 570 if (err) {
573 printk(PRINT_PREF "error occurred as expected\n"); 571 pr_info("error occurred as expected\n");
574 err = 0; 572 err = 0;
575 } else { 573 } else {
576 printk(PRINT_PREF "error: wrote past end of device\n"); 574 pr_err("error: wrote past end of device\n");
577 errcnt += 1; 575 errcnt += 1;
578 } 576 }
579 577
@@ -586,14 +584,14 @@ static int __init mtd_oobtest_init(void)
586 ops.ooboffs = 0; 584 ops.ooboffs = 0;
587 ops.datbuf = NULL; 585 ops.datbuf = NULL;
588 ops.oobbuf = readbuf; 586 ops.oobbuf = readbuf;
589 printk(PRINT_PREF "attempting to read past end of device\n"); 587 pr_info("attempting to read past end of device\n");
590 printk(PRINT_PREF "an error is expected...\n"); 588 pr_info("an error is expected...\n");
591 err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops); 589 err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
592 if (err) { 590 if (err) {
593 printk(PRINT_PREF "error occurred as expected\n"); 591 pr_info("error occurred as expected\n");
594 err = 0; 592 err = 0;
595 } else { 593 } else {
596 printk(PRINT_PREF "error: read past end of device\n"); 594 pr_err("error: read past end of device\n");
597 errcnt += 1; 595 errcnt += 1;
598 } 596 }
599 597
@@ -610,14 +608,14 @@ static int __init mtd_oobtest_init(void)
610 ops.ooboffs = 1; 608 ops.ooboffs = 1;
611 ops.datbuf = NULL; 609 ops.datbuf = NULL;
612 ops.oobbuf = writebuf; 610 ops.oobbuf = writebuf;
613 printk(PRINT_PREF "attempting to write past end of device\n"); 611 pr_info("attempting to write past end of device\n");
614 printk(PRINT_PREF "an error is expected...\n"); 612 pr_info("an error is expected...\n");
615 err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops); 613 err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
616 if (err) { 614 if (err) {
617 printk(PRINT_PREF "error occurred as expected\n"); 615 pr_info("error occurred as expected\n");
618 err = 0; 616 err = 0;
619 } else { 617 } else {
620 printk(PRINT_PREF "error: wrote past end of device\n"); 618 pr_err("error: wrote past end of device\n");
621 errcnt += 1; 619 errcnt += 1;
622 } 620 }
623 621
@@ -630,20 +628,20 @@ static int __init mtd_oobtest_init(void)
630 ops.ooboffs = 1; 628 ops.ooboffs = 1;
631 ops.datbuf = NULL; 629 ops.datbuf = NULL;
632 ops.oobbuf = readbuf; 630 ops.oobbuf = readbuf;
633 printk(PRINT_PREF "attempting to read past end of device\n"); 631 pr_info("attempting to read past end of device\n");
634 printk(PRINT_PREF "an error is expected...\n"); 632 pr_info("an error is expected...\n");
635 err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops); 633 err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
636 if (err) { 634 if (err) {
637 printk(PRINT_PREF "error occurred as expected\n"); 635 pr_info("error occurred as expected\n");
638 err = 0; 636 err = 0;
639 } else { 637 } else {
640 printk(PRINT_PREF "error: read past end of device\n"); 638 pr_err("error: read past end of device\n");
641 errcnt += 1; 639 errcnt += 1;
642 } 640 }
643 } 641 }
644 642
645 /* Fifth test: write / read across block boundaries */ 643 /* Fifth test: write / read across block boundaries */
646 printk(PRINT_PREF "test 5 of 5\n"); 644 pr_info("test 5 of 5\n");
647 645
648 /* Erase all eraseblocks */ 646 /* Erase all eraseblocks */
649 err = erase_whole_device(); 647 err = erase_whole_device();
@@ -652,7 +650,7 @@ static int __init mtd_oobtest_init(void)
652 650
653 /* Write all eraseblocks */ 651 /* Write all eraseblocks */
654 simple_srand(11); 652 simple_srand(11);
655 printk(PRINT_PREF "writing OOBs of whole device\n"); 653 pr_info("writing OOBs of whole device\n");
656 for (i = 0; i < ebcnt - 1; ++i) { 654 for (i = 0; i < ebcnt - 1; ++i) {
657 int cnt = 2; 655 int cnt = 2;
658 int pg; 656 int pg;
@@ -674,17 +672,16 @@ static int __init mtd_oobtest_init(void)
674 if (err) 672 if (err)
675 goto out; 673 goto out;
676 if (i % 256 == 0) 674 if (i % 256 == 0)
677 printk(PRINT_PREF "written up to eraseblock " 675 pr_info("written up to eraseblock %u\n", i);
678 "%u\n", i);
679 cond_resched(); 676 cond_resched();
680 addr += mtd->writesize; 677 addr += mtd->writesize;
681 } 678 }
682 } 679 }
683 printk(PRINT_PREF "written %u eraseblocks\n", i); 680 pr_info("written %u eraseblocks\n", i);
684 681
685 /* Check all eraseblocks */ 682 /* Check all eraseblocks */
686 simple_srand(11); 683 simple_srand(11);
687 printk(PRINT_PREF "verifying all eraseblocks\n"); 684 pr_info("verifying all eraseblocks\n");
688 for (i = 0; i < ebcnt - 1; ++i) { 685 for (i = 0; i < ebcnt - 1; ++i) {
689 if (bbt[i] || bbt[i + 1]) 686 if (bbt[i] || bbt[i + 1])
690 continue; 687 continue;
@@ -702,28 +699,28 @@ static int __init mtd_oobtest_init(void)
702 if (err) 699 if (err)
703 goto out; 700 goto out;
704 if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) { 701 if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
705 printk(PRINT_PREF "error: verify failed at %#llx\n", 702 pr_err("error: verify failed at %#llx\n",
706 (long long)addr); 703 (long long)addr);
707 errcnt += 1; 704 errcnt += 1;
708 if (errcnt > 1000) { 705 if (errcnt > 1000) {
709 printk(PRINT_PREF "error: too many errors\n"); 706 pr_err("error: too many errors\n");
710 goto out; 707 goto out;
711 } 708 }
712 } 709 }
713 if (i % 256 == 0) 710 if (i % 256 == 0)
714 printk(PRINT_PREF "verified up to eraseblock %u\n", i); 711 pr_info("verified up to eraseblock %u\n", i);
715 cond_resched(); 712 cond_resched();
716 } 713 }
717 printk(PRINT_PREF "verified %u eraseblocks\n", i); 714 pr_info("verified %u eraseblocks\n", i);
718 715
719 printk(PRINT_PREF "finished with %d errors\n", errcnt); 716 pr_info("finished with %d errors\n", errcnt);
720out: 717out:
721 kfree(bbt); 718 kfree(bbt);
722 kfree(writebuf); 719 kfree(writebuf);
723 kfree(readbuf); 720 kfree(readbuf);
724 put_mtd_device(mtd); 721 put_mtd_device(mtd);
725 if (err) 722 if (err)
726 printk(PRINT_PREF "error %d occurred\n", err); 723 pr_info("error %d occurred\n", err);
727 printk(KERN_INFO "=================================================\n"); 724 printk(KERN_INFO "=================================================\n");
728 return err; 725 return err;
729} 726}
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index 252ddb092fb2..f93a76f88113 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -19,6 +19,8 @@
19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> 19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
22#include <asm/div64.h> 24#include <asm/div64.h>
23#include <linux/init.h> 25#include <linux/init.h>
24#include <linux/module.h> 26#include <linux/module.h>
@@ -28,8 +30,6 @@
28#include <linux/slab.h> 30#include <linux/slab.h>
29#include <linux/sched.h> 31#include <linux/sched.h>
30 32
31#define PRINT_PREF KERN_INFO "mtd_pagetest: "
32
33static int dev = -EINVAL; 33static int dev = -EINVAL;
34module_param(dev, int, S_IRUGO); 34module_param(dev, int, S_IRUGO);
35MODULE_PARM_DESC(dev, "MTD device number to use"); 35MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -79,12 +79,12 @@ static int erase_eraseblock(int ebnum)
79 79
80 err = mtd_erase(mtd, &ei); 80 err = mtd_erase(mtd, &ei);
81 if (err) { 81 if (err) {
82 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 82 pr_err("error %d while erasing EB %d\n", err, ebnum);
83 return err; 83 return err;
84 } 84 }
85 85
86 if (ei.state == MTD_ERASE_FAILED) { 86 if (ei.state == MTD_ERASE_FAILED) {
87 printk(PRINT_PREF "some erase error occurred at EB %d\n", 87 pr_err("some erase error occurred at EB %d\n",
88 ebnum); 88 ebnum);
89 return -EIO; 89 return -EIO;
90 } 90 }
@@ -102,7 +102,7 @@ static int write_eraseblock(int ebnum)
102 cond_resched(); 102 cond_resched();
103 err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf); 103 err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf);
104 if (err || written != mtd->erasesize) 104 if (err || written != mtd->erasesize)
105 printk(PRINT_PREF "error: write failed at %#llx\n", 105 pr_err("error: write failed at %#llx\n",
106 (long long)addr); 106 (long long)addr);
107 107
108 return err; 108 return err;
@@ -131,7 +131,7 @@ static int verify_eraseblock(int ebnum)
131 if (mtd_is_bitflip(err)) 131 if (mtd_is_bitflip(err))
132 err = 0; 132 err = 0;
133 if (err || read != bufsize) { 133 if (err || read != bufsize) {
134 printk(PRINT_PREF "error: read failed at %#llx\n", 134 pr_err("error: read failed at %#llx\n",
135 (long long)addr0); 135 (long long)addr0);
136 return err; 136 return err;
137 } 137 }
@@ -139,7 +139,7 @@ static int verify_eraseblock(int ebnum)
139 if (mtd_is_bitflip(err)) 139 if (mtd_is_bitflip(err))
140 err = 0; 140 err = 0;
141 if (err || read != bufsize) { 141 if (err || read != bufsize) {
142 printk(PRINT_PREF "error: read failed at %#llx\n", 142 pr_err("error: read failed at %#llx\n",
143 (long long)(addrn - bufsize)); 143 (long long)(addrn - bufsize));
144 return err; 144 return err;
145 } 145 }
@@ -148,12 +148,12 @@ static int verify_eraseblock(int ebnum)
148 if (mtd_is_bitflip(err)) 148 if (mtd_is_bitflip(err))
149 err = 0; 149 err = 0;
150 if (err || read != bufsize) { 150 if (err || read != bufsize) {
151 printk(PRINT_PREF "error: read failed at %#llx\n", 151 pr_err("error: read failed at %#llx\n",
152 (long long)addr); 152 (long long)addr);
153 break; 153 break;
154 } 154 }
155 if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) { 155 if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) {
156 printk(PRINT_PREF "error: verify failed at %#llx\n", 156 pr_err("error: verify failed at %#llx\n",
157 (long long)addr); 157 (long long)addr);
158 errcnt += 1; 158 errcnt += 1;
159 } 159 }
@@ -166,7 +166,7 @@ static int verify_eraseblock(int ebnum)
166 if (mtd_is_bitflip(err)) 166 if (mtd_is_bitflip(err))
167 err = 0; 167 err = 0;
168 if (err || read != bufsize) { 168 if (err || read != bufsize) {
169 printk(PRINT_PREF "error: read failed at %#llx\n", 169 pr_err("error: read failed at %#llx\n",
170 (long long)addr0); 170 (long long)addr0);
171 return err; 171 return err;
172 } 172 }
@@ -174,7 +174,7 @@ static int verify_eraseblock(int ebnum)
174 if (mtd_is_bitflip(err)) 174 if (mtd_is_bitflip(err))
175 err = 0; 175 err = 0;
176 if (err || read != bufsize) { 176 if (err || read != bufsize) {
177 printk(PRINT_PREF "error: read failed at %#llx\n", 177 pr_err("error: read failed at %#llx\n",
178 (long long)(addrn - bufsize)); 178 (long long)(addrn - bufsize));
179 return err; 179 return err;
180 } 180 }
@@ -183,14 +183,14 @@ static int verify_eraseblock(int ebnum)
183 if (mtd_is_bitflip(err)) 183 if (mtd_is_bitflip(err))
184 err = 0; 184 err = 0;
185 if (err || read != bufsize) { 185 if (err || read != bufsize) {
186 printk(PRINT_PREF "error: read failed at %#llx\n", 186 pr_err("error: read failed at %#llx\n",
187 (long long)addr); 187 (long long)addr);
188 return err; 188 return err;
189 } 189 }
190 memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize); 190 memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
191 set_random_data(boundary + pgsize, pgsize); 191 set_random_data(boundary + pgsize, pgsize);
192 if (memcmp(twopages, boundary, bufsize)) { 192 if (memcmp(twopages, boundary, bufsize)) {
193 printk(PRINT_PREF "error: verify failed at %#llx\n", 193 pr_err("error: verify failed at %#llx\n",
194 (long long)addr); 194 (long long)addr);
195 errcnt += 1; 195 errcnt += 1;
196 } 196 }
@@ -206,10 +206,10 @@ static int crosstest(void)
206 loff_t addr, addr0, addrn; 206 loff_t addr, addr0, addrn;
207 unsigned char *pp1, *pp2, *pp3, *pp4; 207 unsigned char *pp1, *pp2, *pp3, *pp4;
208 208
209 printk(PRINT_PREF "crosstest\n"); 209 pr_info("crosstest\n");
210 pp1 = kmalloc(pgsize * 4, GFP_KERNEL); 210 pp1 = kmalloc(pgsize * 4, GFP_KERNEL);
211 if (!pp1) { 211 if (!pp1) {
212 printk(PRINT_PREF "error: cannot allocate memory\n"); 212 pr_err("error: cannot allocate memory\n");
213 return -ENOMEM; 213 return -ENOMEM;
214 } 214 }
215 pp2 = pp1 + pgsize; 215 pp2 = pp1 + pgsize;
@@ -231,7 +231,7 @@ static int crosstest(void)
231 if (mtd_is_bitflip(err)) 231 if (mtd_is_bitflip(err))
232 err = 0; 232 err = 0;
233 if (err || read != pgsize) { 233 if (err || read != pgsize) {
234 printk(PRINT_PREF "error: read failed at %#llx\n", 234 pr_err("error: read failed at %#llx\n",
235 (long long)addr); 235 (long long)addr);
236 kfree(pp1); 236 kfree(pp1);
237 return err; 237 return err;
@@ -243,7 +243,7 @@ static int crosstest(void)
243 if (mtd_is_bitflip(err)) 243 if (mtd_is_bitflip(err))
244 err = 0; 244 err = 0;
245 if (err || read != pgsize) { 245 if (err || read != pgsize) {
246 printk(PRINT_PREF "error: read failed at %#llx\n", 246 pr_err("error: read failed at %#llx\n",
247 (long long)addr); 247 (long long)addr);
248 kfree(pp1); 248 kfree(pp1);
249 return err; 249 return err;
@@ -251,12 +251,12 @@ static int crosstest(void)
251 251
252 /* Read first page to pp2 */ 252 /* Read first page to pp2 */
253 addr = addr0; 253 addr = addr0;
254 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); 254 pr_info("reading page at %#llx\n", (long long)addr);
255 err = mtd_read(mtd, addr, pgsize, &read, pp2); 255 err = mtd_read(mtd, addr, pgsize, &read, pp2);
256 if (mtd_is_bitflip(err)) 256 if (mtd_is_bitflip(err))
257 err = 0; 257 err = 0;
258 if (err || read != pgsize) { 258 if (err || read != pgsize) {
259 printk(PRINT_PREF "error: read failed at %#llx\n", 259 pr_err("error: read failed at %#llx\n",
260 (long long)addr); 260 (long long)addr);
261 kfree(pp1); 261 kfree(pp1);
262 return err; 262 return err;
@@ -264,12 +264,12 @@ static int crosstest(void)
264 264
265 /* Read last page to pp3 */ 265 /* Read last page to pp3 */
266 addr = addrn - pgsize; 266 addr = addrn - pgsize;
267 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); 267 pr_info("reading page at %#llx\n", (long long)addr);
268 err = mtd_read(mtd, addr, pgsize, &read, pp3); 268 err = mtd_read(mtd, addr, pgsize, &read, pp3);
269 if (mtd_is_bitflip(err)) 269 if (mtd_is_bitflip(err))
270 err = 0; 270 err = 0;
271 if (err || read != pgsize) { 271 if (err || read != pgsize) {
272 printk(PRINT_PREF "error: read failed at %#llx\n", 272 pr_err("error: read failed at %#llx\n",
273 (long long)addr); 273 (long long)addr);
274 kfree(pp1); 274 kfree(pp1);
275 return err; 275 return err;
@@ -277,25 +277,25 @@ static int crosstest(void)
277 277
278 /* Read first page again to pp4 */ 278 /* Read first page again to pp4 */
279 addr = addr0; 279 addr = addr0;
280 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); 280 pr_info("reading page at %#llx\n", (long long)addr);
281 err = mtd_read(mtd, addr, pgsize, &read, pp4); 281 err = mtd_read(mtd, addr, pgsize, &read, pp4);
282 if (mtd_is_bitflip(err)) 282 if (mtd_is_bitflip(err))
283 err = 0; 283 err = 0;
284 if (err || read != pgsize) { 284 if (err || read != pgsize) {
285 printk(PRINT_PREF "error: read failed at %#llx\n", 285 pr_err("error: read failed at %#llx\n",
286 (long long)addr); 286 (long long)addr);
287 kfree(pp1); 287 kfree(pp1);
288 return err; 288 return err;
289 } 289 }
290 290
291 /* pp2 and pp4 should be the same */ 291 /* pp2 and pp4 should be the same */
292 printk(PRINT_PREF "verifying pages read at %#llx match\n", 292 pr_info("verifying pages read at %#llx match\n",
293 (long long)addr0); 293 (long long)addr0);
294 if (memcmp(pp2, pp4, pgsize)) { 294 if (memcmp(pp2, pp4, pgsize)) {
295 printk(PRINT_PREF "verify failed!\n"); 295 pr_err("verify failed!\n");
296 errcnt += 1; 296 errcnt += 1;
297 } else if (!err) 297 } else if (!err)
298 printk(PRINT_PREF "crosstest ok\n"); 298 pr_info("crosstest ok\n");
299 kfree(pp1); 299 kfree(pp1);
300 return err; 300 return err;
301} 301}
@@ -307,7 +307,7 @@ static int erasecrosstest(void)
307 loff_t addr0; 307 loff_t addr0;
308 char *readbuf = twopages; 308 char *readbuf = twopages;
309 309
310 printk(PRINT_PREF "erasecrosstest\n"); 310 pr_info("erasecrosstest\n");
311 311
312 ebnum = 0; 312 ebnum = 0;
313 addr0 = 0; 313 addr0 = 0;
@@ -320,79 +320,79 @@ static int erasecrosstest(void)
320 while (ebnum2 && bbt[ebnum2]) 320 while (ebnum2 && bbt[ebnum2])
321 ebnum2 -= 1; 321 ebnum2 -= 1;
322 322
323 printk(PRINT_PREF "erasing block %d\n", ebnum); 323 pr_info("erasing block %d\n", ebnum);
324 err = erase_eraseblock(ebnum); 324 err = erase_eraseblock(ebnum);
325 if (err) 325 if (err)
326 return err; 326 return err;
327 327
328 printk(PRINT_PREF "writing 1st page of block %d\n", ebnum); 328 pr_info("writing 1st page of block %d\n", ebnum);
329 set_random_data(writebuf, pgsize); 329 set_random_data(writebuf, pgsize);
330 strcpy(writebuf, "There is no data like this!"); 330 strcpy(writebuf, "There is no data like this!");
331 err = mtd_write(mtd, addr0, pgsize, &written, writebuf); 331 err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
332 if (err || written != pgsize) { 332 if (err || written != pgsize) {
333 printk(PRINT_PREF "error: write failed at %#llx\n", 333 pr_info("error: write failed at %#llx\n",
334 (long long)addr0); 334 (long long)addr0);
335 return err ? err : -1; 335 return err ? err : -1;
336 } 336 }
337 337
338 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); 338 pr_info("reading 1st page of block %d\n", ebnum);
339 memset(readbuf, 0, pgsize); 339 memset(readbuf, 0, pgsize);
340 err = mtd_read(mtd, addr0, pgsize, &read, readbuf); 340 err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
341 if (mtd_is_bitflip(err)) 341 if (mtd_is_bitflip(err))
342 err = 0; 342 err = 0;
343 if (err || read != pgsize) { 343 if (err || read != pgsize) {
344 printk(PRINT_PREF "error: read failed at %#llx\n", 344 pr_err("error: read failed at %#llx\n",
345 (long long)addr0); 345 (long long)addr0);
346 return err ? err : -1; 346 return err ? err : -1;
347 } 347 }
348 348
349 printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum); 349 pr_info("verifying 1st page of block %d\n", ebnum);
350 if (memcmp(writebuf, readbuf, pgsize)) { 350 if (memcmp(writebuf, readbuf, pgsize)) {
351 printk(PRINT_PREF "verify failed!\n"); 351 pr_err("verify failed!\n");
352 errcnt += 1; 352 errcnt += 1;
353 return -1; 353 return -1;
354 } 354 }
355 355
356 printk(PRINT_PREF "erasing block %d\n", ebnum); 356 pr_info("erasing block %d\n", ebnum);
357 err = erase_eraseblock(ebnum); 357 err = erase_eraseblock(ebnum);
358 if (err) 358 if (err)
359 return err; 359 return err;
360 360
361 printk(PRINT_PREF "writing 1st page of block %d\n", ebnum); 361 pr_info("writing 1st page of block %d\n", ebnum);
362 set_random_data(writebuf, pgsize); 362 set_random_data(writebuf, pgsize);
363 strcpy(writebuf, "There is no data like this!"); 363 strcpy(writebuf, "There is no data like this!");
364 err = mtd_write(mtd, addr0, pgsize, &written, writebuf); 364 err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
365 if (err || written != pgsize) { 365 if (err || written != pgsize) {
366 printk(PRINT_PREF "error: write failed at %#llx\n", 366 pr_err("error: write failed at %#llx\n",
367 (long long)addr0); 367 (long long)addr0);
368 return err ? err : -1; 368 return err ? err : -1;
369 } 369 }
370 370
371 printk(PRINT_PREF "erasing block %d\n", ebnum2); 371 pr_info("erasing block %d\n", ebnum2);
372 err = erase_eraseblock(ebnum2); 372 err = erase_eraseblock(ebnum2);
373 if (err) 373 if (err)
374 return err; 374 return err;
375 375
376 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); 376 pr_info("reading 1st page of block %d\n", ebnum);
377 memset(readbuf, 0, pgsize); 377 memset(readbuf, 0, pgsize);
378 err = mtd_read(mtd, addr0, pgsize, &read, readbuf); 378 err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
379 if (mtd_is_bitflip(err)) 379 if (mtd_is_bitflip(err))
380 err = 0; 380 err = 0;
381 if (err || read != pgsize) { 381 if (err || read != pgsize) {
382 printk(PRINT_PREF "error: read failed at %#llx\n", 382 pr_err("error: read failed at %#llx\n",
383 (long long)addr0); 383 (long long)addr0);
384 return err ? err : -1; 384 return err ? err : -1;
385 } 385 }
386 386
387 printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum); 387 pr_info("verifying 1st page of block %d\n", ebnum);
388 if (memcmp(writebuf, readbuf, pgsize)) { 388 if (memcmp(writebuf, readbuf, pgsize)) {
389 printk(PRINT_PREF "verify failed!\n"); 389 pr_err("verify failed!\n");
390 errcnt += 1; 390 errcnt += 1;
391 return -1; 391 return -1;
392 } 392 }
393 393
394 if (!err) 394 if (!err)
395 printk(PRINT_PREF "erasecrosstest ok\n"); 395 pr_info("erasecrosstest ok\n");
396 return err; 396 return err;
397} 397}
398 398
@@ -402,7 +402,7 @@ static int erasetest(void)
402 int err = 0, i, ebnum, ok = 1; 402 int err = 0, i, ebnum, ok = 1;
403 loff_t addr0; 403 loff_t addr0;
404 404
405 printk(PRINT_PREF "erasetest\n"); 405 pr_info("erasetest\n");
406 406
407 ebnum = 0; 407 ebnum = 0;
408 addr0 = 0; 408 addr0 = 0;
@@ -411,40 +411,40 @@ static int erasetest(void)
411 ebnum += 1; 411 ebnum += 1;
412 } 412 }
413 413
414 printk(PRINT_PREF "erasing block %d\n", ebnum); 414 pr_info("erasing block %d\n", ebnum);
415 err = erase_eraseblock(ebnum); 415 err = erase_eraseblock(ebnum);
416 if (err) 416 if (err)
417 return err; 417 return err;
418 418
419 printk(PRINT_PREF "writing 1st page of block %d\n", ebnum); 419 pr_info("writing 1st page of block %d\n", ebnum);
420 set_random_data(writebuf, pgsize); 420 set_random_data(writebuf, pgsize);
421 err = mtd_write(mtd, addr0, pgsize, &written, writebuf); 421 err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
422 if (err || written != pgsize) { 422 if (err || written != pgsize) {
423 printk(PRINT_PREF "error: write failed at %#llx\n", 423 pr_err("error: write failed at %#llx\n",
424 (long long)addr0); 424 (long long)addr0);
425 return err ? err : -1; 425 return err ? err : -1;
426 } 426 }
427 427
428 printk(PRINT_PREF "erasing block %d\n", ebnum); 428 pr_info("erasing block %d\n", ebnum);
429 err = erase_eraseblock(ebnum); 429 err = erase_eraseblock(ebnum);
430 if (err) 430 if (err)
431 return err; 431 return err;
432 432
433 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); 433 pr_info("reading 1st page of block %d\n", ebnum);
434 err = mtd_read(mtd, addr0, pgsize, &read, twopages); 434 err = mtd_read(mtd, addr0, pgsize, &read, twopages);
435 if (mtd_is_bitflip(err)) 435 if (mtd_is_bitflip(err))
436 err = 0; 436 err = 0;
437 if (err || read != pgsize) { 437 if (err || read != pgsize) {
438 printk(PRINT_PREF "error: read failed at %#llx\n", 438 pr_err("error: read failed at %#llx\n",
439 (long long)addr0); 439 (long long)addr0);
440 return err ? err : -1; 440 return err ? err : -1;
441 } 441 }
442 442
443 printk(PRINT_PREF "verifying 1st page of block %d is all 0xff\n", 443 pr_info("verifying 1st page of block %d is all 0xff\n",
444 ebnum); 444 ebnum);
445 for (i = 0; i < pgsize; ++i) 445 for (i = 0; i < pgsize; ++i)
446 if (twopages[i] != 0xff) { 446 if (twopages[i] != 0xff) {
447 printk(PRINT_PREF "verifying all 0xff failed at %d\n", 447 pr_err("verifying all 0xff failed at %d\n",
448 i); 448 i);
449 errcnt += 1; 449 errcnt += 1;
450 ok = 0; 450 ok = 0;
@@ -452,7 +452,7 @@ static int erasetest(void)
452 } 452 }
453 453
454 if (ok && !err) 454 if (ok && !err)
455 printk(PRINT_PREF "erasetest ok\n"); 455 pr_info("erasetest ok\n");
456 456
457 return err; 457 return err;
458} 458}
@@ -464,7 +464,7 @@ static int is_block_bad(int ebnum)
464 464
465 ret = mtd_block_isbad(mtd, addr); 465 ret = mtd_block_isbad(mtd, addr);
466 if (ret) 466 if (ret)
467 printk(PRINT_PREF "block %d is bad\n", ebnum); 467 pr_info("block %d is bad\n", ebnum);
468 return ret; 468 return ret;
469} 469}
470 470
@@ -474,18 +474,18 @@ static int scan_for_bad_eraseblocks(void)
474 474
475 bbt = kzalloc(ebcnt, GFP_KERNEL); 475 bbt = kzalloc(ebcnt, GFP_KERNEL);
476 if (!bbt) { 476 if (!bbt) {
477 printk(PRINT_PREF "error: cannot allocate memory\n"); 477 pr_err("error: cannot allocate memory\n");
478 return -ENOMEM; 478 return -ENOMEM;
479 } 479 }
480 480
481 printk(PRINT_PREF "scanning for bad eraseblocks\n"); 481 pr_info("scanning for bad eraseblocks\n");
482 for (i = 0; i < ebcnt; ++i) { 482 for (i = 0; i < ebcnt; ++i) {
483 bbt[i] = is_block_bad(i) ? 1 : 0; 483 bbt[i] = is_block_bad(i) ? 1 : 0;
484 if (bbt[i]) 484 if (bbt[i])
485 bad += 1; 485 bad += 1;
486 cond_resched(); 486 cond_resched();
487 } 487 }
488 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); 488 pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
489 return 0; 489 return 0;
490} 490}
491 491
@@ -499,22 +499,22 @@ static int __init mtd_pagetest_init(void)
499 printk(KERN_INFO "=================================================\n"); 499 printk(KERN_INFO "=================================================\n");
500 500
501 if (dev < 0) { 501 if (dev < 0) {
502 printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 502 pr_info("Please specify a valid mtd-device via module parameter\n");
503 printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 503 pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
504 return -EINVAL; 504 return -EINVAL;
505 } 505 }
506 506
507 printk(PRINT_PREF "MTD device: %d\n", dev); 507 pr_info("MTD device: %d\n", dev);
508 508
509 mtd = get_mtd_device(NULL, dev); 509 mtd = get_mtd_device(NULL, dev);
510 if (IS_ERR(mtd)) { 510 if (IS_ERR(mtd)) {
511 err = PTR_ERR(mtd); 511 err = PTR_ERR(mtd);
512 printk(PRINT_PREF "error: cannot get MTD device\n"); 512 pr_err("error: cannot get MTD device\n");
513 return err; 513 return err;
514 } 514 }
515 515
516 if (mtd->type != MTD_NANDFLASH) { 516 if (mtd->type != MTD_NANDFLASH) {
517 printk(PRINT_PREF "this test requires NAND flash\n"); 517 pr_info("this test requires NAND flash\n");
518 goto out; 518 goto out;
519 } 519 }
520 520
@@ -524,7 +524,7 @@ static int __init mtd_pagetest_init(void)
524 pgcnt = mtd->erasesize / mtd->writesize; 524 pgcnt = mtd->erasesize / mtd->writesize;
525 pgsize = mtd->writesize; 525 pgsize = mtd->writesize;
526 526
527 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 527 pr_info("MTD device size %llu, eraseblock size %u, "
528 "page size %u, count of eraseblocks %u, pages per " 528 "page size %u, count of eraseblocks %u, pages per "
529 "eraseblock %u, OOB size %u\n", 529 "eraseblock %u, OOB size %u\n",
530 (unsigned long long)mtd->size, mtd->erasesize, 530 (unsigned long long)mtd->size, mtd->erasesize,
@@ -534,17 +534,17 @@ static int __init mtd_pagetest_init(void)
534 bufsize = pgsize * 2; 534 bufsize = pgsize * 2;
535 writebuf = kmalloc(mtd->erasesize, GFP_KERNEL); 535 writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
536 if (!writebuf) { 536 if (!writebuf) {
537 printk(PRINT_PREF "error: cannot allocate memory\n"); 537 pr_err("error: cannot allocate memory\n");
538 goto out; 538 goto out;
539 } 539 }
540 twopages = kmalloc(bufsize, GFP_KERNEL); 540 twopages = kmalloc(bufsize, GFP_KERNEL);
541 if (!twopages) { 541 if (!twopages) {
542 printk(PRINT_PREF "error: cannot allocate memory\n"); 542 pr_err("error: cannot allocate memory\n");
543 goto out; 543 goto out;
544 } 544 }
545 boundary = kmalloc(bufsize, GFP_KERNEL); 545 boundary = kmalloc(bufsize, GFP_KERNEL);
546 if (!boundary) { 546 if (!boundary) {
547 printk(PRINT_PREF "error: cannot allocate memory\n"); 547 pr_err("error: cannot allocate memory\n");
548 goto out; 548 goto out;
549 } 549 }
550 550
@@ -553,7 +553,7 @@ static int __init mtd_pagetest_init(void)
553 goto out; 553 goto out;
554 554
555 /* Erase all eraseblocks */ 555 /* Erase all eraseblocks */
556 printk(PRINT_PREF "erasing whole device\n"); 556 pr_info("erasing whole device\n");
557 for (i = 0; i < ebcnt; ++i) { 557 for (i = 0; i < ebcnt; ++i) {
558 if (bbt[i]) 558 if (bbt[i])
559 continue; 559 continue;
@@ -562,11 +562,11 @@ static int __init mtd_pagetest_init(void)
562 goto out; 562 goto out;
563 cond_resched(); 563 cond_resched();
564 } 564 }
565 printk(PRINT_PREF "erased %u eraseblocks\n", i); 565 pr_info("erased %u eraseblocks\n", i);
566 566
567 /* Write all eraseblocks */ 567 /* Write all eraseblocks */
568 simple_srand(1); 568 simple_srand(1);
569 printk(PRINT_PREF "writing whole device\n"); 569 pr_info("writing whole device\n");
570 for (i = 0; i < ebcnt; ++i) { 570 for (i = 0; i < ebcnt; ++i) {
571 if (bbt[i]) 571 if (bbt[i])
572 continue; 572 continue;
@@ -574,14 +574,14 @@ static int __init mtd_pagetest_init(void)
574 if (err) 574 if (err)
575 goto out; 575 goto out;
576 if (i % 256 == 0) 576 if (i % 256 == 0)
577 printk(PRINT_PREF "written up to eraseblock %u\n", i); 577 pr_info("written up to eraseblock %u\n", i);
578 cond_resched(); 578 cond_resched();
579 } 579 }
580 printk(PRINT_PREF "written %u eraseblocks\n", i); 580 pr_info("written %u eraseblocks\n", i);
581 581
582 /* Check all eraseblocks */ 582 /* Check all eraseblocks */
583 simple_srand(1); 583 simple_srand(1);
584 printk(PRINT_PREF "verifying all eraseblocks\n"); 584 pr_info("verifying all eraseblocks\n");
585 for (i = 0; i < ebcnt; ++i) { 585 for (i = 0; i < ebcnt; ++i) {
586 if (bbt[i]) 586 if (bbt[i])
587 continue; 587 continue;
@@ -589,10 +589,10 @@ static int __init mtd_pagetest_init(void)
589 if (err) 589 if (err)
590 goto out; 590 goto out;
591 if (i % 256 == 0) 591 if (i % 256 == 0)
592 printk(PRINT_PREF "verified up to eraseblock %u\n", i); 592 pr_info("verified up to eraseblock %u\n", i);
593 cond_resched(); 593 cond_resched();
594 } 594 }
595 printk(PRINT_PREF "verified %u eraseblocks\n", i); 595 pr_info("verified %u eraseblocks\n", i);
596 596
597 err = crosstest(); 597 err = crosstest();
598 if (err) 598 if (err)
@@ -606,7 +606,7 @@ static int __init mtd_pagetest_init(void)
606 if (err) 606 if (err)
607 goto out; 607 goto out;
608 608
609 printk(PRINT_PREF "finished with %d errors\n", errcnt); 609 pr_info("finished with %d errors\n", errcnt);
610out: 610out:
611 611
612 kfree(bbt); 612 kfree(bbt);
@@ -615,7 +615,7 @@ out:
615 kfree(writebuf); 615 kfree(writebuf);
616 put_mtd_device(mtd); 616 put_mtd_device(mtd);
617 if (err) 617 if (err)
618 printk(PRINT_PREF "error %d occurred\n", err); 618 pr_info("error %d occurred\n", err);
619 printk(KERN_INFO "=================================================\n"); 619 printk(KERN_INFO "=================================================\n");
620 return err; 620 return err;
621} 621}
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index 121aba189cec..266de04b6d29 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -19,6 +19,8 @@
19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> 19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
22#include <linux/init.h> 24#include <linux/init.h>
23#include <linux/module.h> 25#include <linux/module.h>
24#include <linux/moduleparam.h> 26#include <linux/moduleparam.h>
@@ -27,8 +29,6 @@
27#include <linux/slab.h> 29#include <linux/slab.h>
28#include <linux/sched.h> 30#include <linux/sched.h>
29 31
30#define PRINT_PREF KERN_INFO "mtd_readtest: "
31
32static int dev = -EINVAL; 32static int dev = -EINVAL;
33module_param(dev, int, S_IRUGO); 33module_param(dev, int, S_IRUGO);
34MODULE_PARM_DESC(dev, "MTD device number to use"); 34MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -51,12 +51,12 @@ static int read_eraseblock_by_page(int ebnum)
51 void *oobbuf = iobuf1; 51 void *oobbuf = iobuf1;
52 52
53 for (i = 0; i < pgcnt; i++) { 53 for (i = 0; i < pgcnt; i++) {
54 memset(buf, 0 , pgcnt); 54 memset(buf, 0 , pgsize);
55 ret = mtd_read(mtd, addr, pgsize, &read, buf); 55 ret = mtd_read(mtd, addr, pgsize, &read, buf);
56 if (ret == -EUCLEAN) 56 if (ret == -EUCLEAN)
57 ret = 0; 57 ret = 0;
58 if (ret || read != pgsize) { 58 if (ret || read != pgsize) {
59 printk(PRINT_PREF "error: read failed at %#llx\n", 59 pr_err("error: read failed at %#llx\n",
60 (long long)addr); 60 (long long)addr);
61 if (!err) 61 if (!err)
62 err = ret; 62 err = ret;
@@ -77,7 +77,7 @@ static int read_eraseblock_by_page(int ebnum)
77 ret = mtd_read_oob(mtd, addr, &ops); 77 ret = mtd_read_oob(mtd, addr, &ops);
78 if ((ret && !mtd_is_bitflip(ret)) || 78 if ((ret && !mtd_is_bitflip(ret)) ||
79 ops.oobretlen != mtd->oobsize) { 79 ops.oobretlen != mtd->oobsize) {
80 printk(PRINT_PREF "error: read oob failed at " 80 pr_err("error: read oob failed at "
81 "%#llx\n", (long long)addr); 81 "%#llx\n", (long long)addr);
82 if (!err) 82 if (!err)
83 err = ret; 83 err = ret;
@@ -99,7 +99,7 @@ static void dump_eraseblock(int ebnum)
99 char line[128]; 99 char line[128];
100 int pg, oob; 100 int pg, oob;
101 101
102 printk(PRINT_PREF "dumping eraseblock %d\n", ebnum); 102 pr_info("dumping eraseblock %d\n", ebnum);
103 n = mtd->erasesize; 103 n = mtd->erasesize;
104 for (i = 0; i < n;) { 104 for (i = 0; i < n;) {
105 char *p = line; 105 char *p = line;
@@ -112,7 +112,7 @@ static void dump_eraseblock(int ebnum)
112 } 112 }
113 if (!mtd->oobsize) 113 if (!mtd->oobsize)
114 return; 114 return;
115 printk(PRINT_PREF "dumping oob from eraseblock %d\n", ebnum); 115 pr_info("dumping oob from eraseblock %d\n", ebnum);
116 n = mtd->oobsize; 116 n = mtd->oobsize;
117 for (pg = 0, i = 0; pg < pgcnt; pg++) 117 for (pg = 0, i = 0; pg < pgcnt; pg++)
118 for (oob = 0; oob < n;) { 118 for (oob = 0; oob < n;) {
@@ -134,7 +134,7 @@ static int is_block_bad(int ebnum)
134 134
135 ret = mtd_block_isbad(mtd, addr); 135 ret = mtd_block_isbad(mtd, addr);
136 if (ret) 136 if (ret)
137 printk(PRINT_PREF "block %d is bad\n", ebnum); 137 pr_info("block %d is bad\n", ebnum);
138 return ret; 138 return ret;
139} 139}
140 140
@@ -144,21 +144,21 @@ static int scan_for_bad_eraseblocks(void)
144 144
145 bbt = kzalloc(ebcnt, GFP_KERNEL); 145 bbt = kzalloc(ebcnt, GFP_KERNEL);
146 if (!bbt) { 146 if (!bbt) {
147 printk(PRINT_PREF "error: cannot allocate memory\n"); 147 pr_err("error: cannot allocate memory\n");
148 return -ENOMEM; 148 return -ENOMEM;
149 } 149 }
150 150
151 if (!mtd_can_have_bb(mtd)) 151 if (!mtd_can_have_bb(mtd))
152 return 0; 152 return 0;
153 153
154 printk(PRINT_PREF "scanning for bad eraseblocks\n"); 154 pr_info("scanning for bad eraseblocks\n");
155 for (i = 0; i < ebcnt; ++i) { 155 for (i = 0; i < ebcnt; ++i) {
156 bbt[i] = is_block_bad(i) ? 1 : 0; 156 bbt[i] = is_block_bad(i) ? 1 : 0;
157 if (bbt[i]) 157 if (bbt[i])
158 bad += 1; 158 bad += 1;
159 cond_resched(); 159 cond_resched();
160 } 160 }
161 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); 161 pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
162 return 0; 162 return 0;
163} 163}
164 164
@@ -171,21 +171,21 @@ static int __init mtd_readtest_init(void)
171 printk(KERN_INFO "=================================================\n"); 171 printk(KERN_INFO "=================================================\n");
172 172
173 if (dev < 0) { 173 if (dev < 0) {
174 printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 174 pr_info("Please specify a valid mtd-device via module parameter\n");
175 return -EINVAL; 175 return -EINVAL;
176 } 176 }
177 177
178 printk(PRINT_PREF "MTD device: %d\n", dev); 178 pr_info("MTD device: %d\n", dev);
179 179
180 mtd = get_mtd_device(NULL, dev); 180 mtd = get_mtd_device(NULL, dev);
181 if (IS_ERR(mtd)) { 181 if (IS_ERR(mtd)) {
182 err = PTR_ERR(mtd); 182 err = PTR_ERR(mtd);
183 printk(PRINT_PREF "error: Cannot get MTD device\n"); 183 pr_err("error: Cannot get MTD device\n");
184 return err; 184 return err;
185 } 185 }
186 186
187 if (mtd->writesize == 1) { 187 if (mtd->writesize == 1) {
188 printk(PRINT_PREF "not NAND flash, assume page size is 512 " 188 pr_info("not NAND flash, assume page size is 512 "
189 "bytes.\n"); 189 "bytes.\n");
190 pgsize = 512; 190 pgsize = 512;
191 } else 191 } else
@@ -196,7 +196,7 @@ static int __init mtd_readtest_init(void)
196 ebcnt = tmp; 196 ebcnt = tmp;
197 pgcnt = mtd->erasesize / pgsize; 197 pgcnt = mtd->erasesize / pgsize;
198 198
199 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 199 pr_info("MTD device size %llu, eraseblock size %u, "
200 "page size %u, count of eraseblocks %u, pages per " 200 "page size %u, count of eraseblocks %u, pages per "
201 "eraseblock %u, OOB size %u\n", 201 "eraseblock %u, OOB size %u\n",
202 (unsigned long long)mtd->size, mtd->erasesize, 202 (unsigned long long)mtd->size, mtd->erasesize,
@@ -205,12 +205,12 @@ static int __init mtd_readtest_init(void)
205 err = -ENOMEM; 205 err = -ENOMEM;
206 iobuf = kmalloc(mtd->erasesize, GFP_KERNEL); 206 iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
207 if (!iobuf) { 207 if (!iobuf) {
208 printk(PRINT_PREF "error: cannot allocate memory\n"); 208 pr_err("error: cannot allocate memory\n");
209 goto out; 209 goto out;
210 } 210 }
211 iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL); 211 iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL);
212 if (!iobuf1) { 212 if (!iobuf1) {
213 printk(PRINT_PREF "error: cannot allocate memory\n"); 213 pr_err("error: cannot allocate memory\n");
214 goto out; 214 goto out;
215 } 215 }
216 216
@@ -219,7 +219,7 @@ static int __init mtd_readtest_init(void)
219 goto out; 219 goto out;
220 220
221 /* Read all eraseblocks 1 page at a time */ 221 /* Read all eraseblocks 1 page at a time */
222 printk(PRINT_PREF "testing page read\n"); 222 pr_info("testing page read\n");
223 for (i = 0; i < ebcnt; ++i) { 223 for (i = 0; i < ebcnt; ++i) {
224 int ret; 224 int ret;
225 225
@@ -235,9 +235,9 @@ static int __init mtd_readtest_init(void)
235 } 235 }
236 236
237 if (err) 237 if (err)
238 printk(PRINT_PREF "finished with errors\n"); 238 pr_info("finished with errors\n");
239 else 239 else
240 printk(PRINT_PREF "finished\n"); 240 pr_info("finished\n");
241 241
242out: 242out:
243 243
@@ -246,7 +246,7 @@ out:
246 kfree(bbt); 246 kfree(bbt);
247 put_mtd_device(mtd); 247 put_mtd_device(mtd);
248 if (err) 248 if (err)
249 printk(PRINT_PREF "error %d occurred\n", err); 249 pr_info("error %d occurred\n", err);
250 printk(KERN_INFO "=================================================\n"); 250 printk(KERN_INFO "=================================================\n");
251 return err; 251 return err;
252} 252}
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 42b0f7456fc4..596cbea8df4c 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -19,6 +19,8 @@
19 * Author: Adrian Hunter <adrian.hunter@nokia.com> 19 * Author: Adrian Hunter <adrian.hunter@nokia.com>
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
22#include <linux/init.h> 24#include <linux/init.h>
23#include <linux/module.h> 25#include <linux/module.h>
24#include <linux/moduleparam.h> 26#include <linux/moduleparam.h>
@@ -28,8 +30,6 @@
28#include <linux/sched.h> 30#include <linux/sched.h>
29#include <linux/random.h> 31#include <linux/random.h>
30 32
31#define PRINT_PREF KERN_INFO "mtd_speedtest: "
32
33static int dev = -EINVAL; 33static int dev = -EINVAL;
34module_param(dev, int, S_IRUGO); 34module_param(dev, int, S_IRUGO);
35MODULE_PARM_DESC(dev, "MTD device number to use"); 35MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -70,12 +70,12 @@ static int erase_eraseblock(int ebnum)
70 70
71 err = mtd_erase(mtd, &ei); 71 err = mtd_erase(mtd, &ei);
72 if (err) { 72 if (err) {
73 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 73 pr_err("error %d while erasing EB %d\n", err, ebnum);
74 return err; 74 return err;
75 } 75 }
76 76
77 if (ei.state == MTD_ERASE_FAILED) { 77 if (ei.state == MTD_ERASE_FAILED) {
78 printk(PRINT_PREF "some erase error occurred at EB %d\n", 78 pr_err("some erase error occurred at EB %d\n",
79 ebnum); 79 ebnum);
80 return -EIO; 80 return -EIO;
81 } 81 }
@@ -96,13 +96,13 @@ static int multiblock_erase(int ebnum, int blocks)
96 96
97 err = mtd_erase(mtd, &ei); 97 err = mtd_erase(mtd, &ei);
98 if (err) { 98 if (err) {
99 printk(PRINT_PREF "error %d while erasing EB %d, blocks %d\n", 99 pr_err("error %d while erasing EB %d, blocks %d\n",
100 err, ebnum, blocks); 100 err, ebnum, blocks);
101 return err; 101 return err;
102 } 102 }
103 103
104 if (ei.state == MTD_ERASE_FAILED) { 104 if (ei.state == MTD_ERASE_FAILED) {
105 printk(PRINT_PREF "some erase error occurred at EB %d," 105 pr_err("some erase error occurred at EB %d,"
106 "blocks %d\n", ebnum, blocks); 106 "blocks %d\n", ebnum, blocks);
107 return -EIO; 107 return -EIO;
108 } 108 }
@@ -134,7 +134,7 @@ static int write_eraseblock(int ebnum)
134 134
135 err = mtd_write(mtd, addr, mtd->erasesize, &written, iobuf); 135 err = mtd_write(mtd, addr, mtd->erasesize, &written, iobuf);
136 if (err || written != mtd->erasesize) { 136 if (err || written != mtd->erasesize) {
137 printk(PRINT_PREF "error: write failed at %#llx\n", addr); 137 pr_err("error: write failed at %#llx\n", addr);
138 if (!err) 138 if (!err)
139 err = -EINVAL; 139 err = -EINVAL;
140 } 140 }
@@ -152,7 +152,7 @@ static int write_eraseblock_by_page(int ebnum)
152 for (i = 0; i < pgcnt; i++) { 152 for (i = 0; i < pgcnt; i++) {
153 err = mtd_write(mtd, addr, pgsize, &written, buf); 153 err = mtd_write(mtd, addr, pgsize, &written, buf);
154 if (err || written != pgsize) { 154 if (err || written != pgsize) {
155 printk(PRINT_PREF "error: write failed at %#llx\n", 155 pr_err("error: write failed at %#llx\n",
156 addr); 156 addr);
157 if (!err) 157 if (!err)
158 err = -EINVAL; 158 err = -EINVAL;
@@ -175,7 +175,7 @@ static int write_eraseblock_by_2pages(int ebnum)
175 for (i = 0; i < n; i++) { 175 for (i = 0; i < n; i++) {
176 err = mtd_write(mtd, addr, sz, &written, buf); 176 err = mtd_write(mtd, addr, sz, &written, buf);
177 if (err || written != sz) { 177 if (err || written != sz) {
178 printk(PRINT_PREF "error: write failed at %#llx\n", 178 pr_err("error: write failed at %#llx\n",
179 addr); 179 addr);
180 if (!err) 180 if (!err)
181 err = -EINVAL; 181 err = -EINVAL;
@@ -187,7 +187,7 @@ static int write_eraseblock_by_2pages(int ebnum)
187 if (pgcnt % 2) { 187 if (pgcnt % 2) {
188 err = mtd_write(mtd, addr, pgsize, &written, buf); 188 err = mtd_write(mtd, addr, pgsize, &written, buf);
189 if (err || written != pgsize) { 189 if (err || written != pgsize) {
190 printk(PRINT_PREF "error: write failed at %#llx\n", 190 pr_err("error: write failed at %#llx\n",
191 addr); 191 addr);
192 if (!err) 192 if (!err)
193 err = -EINVAL; 193 err = -EINVAL;
@@ -208,7 +208,7 @@ static int read_eraseblock(int ebnum)
208 if (mtd_is_bitflip(err)) 208 if (mtd_is_bitflip(err))
209 err = 0; 209 err = 0;
210 if (err || read != mtd->erasesize) { 210 if (err || read != mtd->erasesize) {
211 printk(PRINT_PREF "error: read failed at %#llx\n", addr); 211 pr_err("error: read failed at %#llx\n", addr);
212 if (!err) 212 if (!err)
213 err = -EINVAL; 213 err = -EINVAL;
214 } 214 }
@@ -229,7 +229,7 @@ static int read_eraseblock_by_page(int ebnum)
229 if (mtd_is_bitflip(err)) 229 if (mtd_is_bitflip(err))
230 err = 0; 230 err = 0;
231 if (err || read != pgsize) { 231 if (err || read != pgsize) {
232 printk(PRINT_PREF "error: read failed at %#llx\n", 232 pr_err("error: read failed at %#llx\n",
233 addr); 233 addr);
234 if (!err) 234 if (!err)
235 err = -EINVAL; 235 err = -EINVAL;
@@ -255,7 +255,7 @@ static int read_eraseblock_by_2pages(int ebnum)
255 if (mtd_is_bitflip(err)) 255 if (mtd_is_bitflip(err))
256 err = 0; 256 err = 0;
257 if (err || read != sz) { 257 if (err || read != sz) {
258 printk(PRINT_PREF "error: read failed at %#llx\n", 258 pr_err("error: read failed at %#llx\n",
259 addr); 259 addr);
260 if (!err) 260 if (!err)
261 err = -EINVAL; 261 err = -EINVAL;
@@ -270,7 +270,7 @@ static int read_eraseblock_by_2pages(int ebnum)
270 if (mtd_is_bitflip(err)) 270 if (mtd_is_bitflip(err))
271 err = 0; 271 err = 0;
272 if (err || read != pgsize) { 272 if (err || read != pgsize) {
273 printk(PRINT_PREF "error: read failed at %#llx\n", 273 pr_err("error: read failed at %#llx\n",
274 addr); 274 addr);
275 if (!err) 275 if (!err)
276 err = -EINVAL; 276 err = -EINVAL;
@@ -287,7 +287,7 @@ static int is_block_bad(int ebnum)
287 287
288 ret = mtd_block_isbad(mtd, addr); 288 ret = mtd_block_isbad(mtd, addr);
289 if (ret) 289 if (ret)
290 printk(PRINT_PREF "block %d is bad\n", ebnum); 290 pr_info("block %d is bad\n", ebnum);
291 return ret; 291 return ret;
292} 292}
293 293
@@ -321,21 +321,21 @@ static int scan_for_bad_eraseblocks(void)
321 321
322 bbt = kzalloc(ebcnt, GFP_KERNEL); 322 bbt = kzalloc(ebcnt, GFP_KERNEL);
323 if (!bbt) { 323 if (!bbt) {
324 printk(PRINT_PREF "error: cannot allocate memory\n"); 324 pr_err("error: cannot allocate memory\n");
325 return -ENOMEM; 325 return -ENOMEM;
326 } 326 }
327 327
328 if (!mtd_can_have_bb(mtd)) 328 if (!mtd_can_have_bb(mtd))
329 goto out; 329 goto out;
330 330
331 printk(PRINT_PREF "scanning for bad eraseblocks\n"); 331 pr_info("scanning for bad eraseblocks\n");
332 for (i = 0; i < ebcnt; ++i) { 332 for (i = 0; i < ebcnt; ++i) {
333 bbt[i] = is_block_bad(i) ? 1 : 0; 333 bbt[i] = is_block_bad(i) ? 1 : 0;
334 if (bbt[i]) 334 if (bbt[i])
335 bad += 1; 335 bad += 1;
336 cond_resched(); 336 cond_resched();
337 } 337 }
338 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); 338 pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
339out: 339out:
340 goodebcnt = ebcnt - bad; 340 goodebcnt = ebcnt - bad;
341 return 0; 341 return 0;
@@ -351,25 +351,25 @@ static int __init mtd_speedtest_init(void)
351 printk(KERN_INFO "=================================================\n"); 351 printk(KERN_INFO "=================================================\n");
352 352
353 if (dev < 0) { 353 if (dev < 0) {
354 printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 354 pr_info("Please specify a valid mtd-device via module parameter\n");
355 printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 355 pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
356 return -EINVAL; 356 return -EINVAL;
357 } 357 }
358 358
359 if (count) 359 if (count)
360 printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count); 360 pr_info("MTD device: %d count: %d\n", dev, count);
361 else 361 else
362 printk(PRINT_PREF "MTD device: %d\n", dev); 362 pr_info("MTD device: %d\n", dev);
363 363
364 mtd = get_mtd_device(NULL, dev); 364 mtd = get_mtd_device(NULL, dev);
365 if (IS_ERR(mtd)) { 365 if (IS_ERR(mtd)) {
366 err = PTR_ERR(mtd); 366 err = PTR_ERR(mtd);
367 printk(PRINT_PREF "error: cannot get MTD device\n"); 367 pr_err("error: cannot get MTD device\n");
368 return err; 368 return err;
369 } 369 }
370 370
371 if (mtd->writesize == 1) { 371 if (mtd->writesize == 1) {
372 printk(PRINT_PREF "not NAND flash, assume page size is 512 " 372 pr_info("not NAND flash, assume page size is 512 "
373 "bytes.\n"); 373 "bytes.\n");
374 pgsize = 512; 374 pgsize = 512;
375 } else 375 } else
@@ -380,7 +380,7 @@ static int __init mtd_speedtest_init(void)
380 ebcnt = tmp; 380 ebcnt = tmp;
381 pgcnt = mtd->erasesize / pgsize; 381 pgcnt = mtd->erasesize / pgsize;
382 382
383 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 383 pr_info("MTD device size %llu, eraseblock size %u, "
384 "page size %u, count of eraseblocks %u, pages per " 384 "page size %u, count of eraseblocks %u, pages per "
385 "eraseblock %u, OOB size %u\n", 385 "eraseblock %u, OOB size %u\n",
386 (unsigned long long)mtd->size, mtd->erasesize, 386 (unsigned long long)mtd->size, mtd->erasesize,
@@ -392,7 +392,7 @@ static int __init mtd_speedtest_init(void)
392 err = -ENOMEM; 392 err = -ENOMEM;
393 iobuf = kmalloc(mtd->erasesize, GFP_KERNEL); 393 iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
394 if (!iobuf) { 394 if (!iobuf) {
395 printk(PRINT_PREF "error: cannot allocate memory\n"); 395 pr_err("error: cannot allocate memory\n");
396 goto out; 396 goto out;
397 } 397 }
398 398
@@ -407,7 +407,7 @@ static int __init mtd_speedtest_init(void)
407 goto out; 407 goto out;
408 408
409 /* Write all eraseblocks, 1 eraseblock at a time */ 409 /* Write all eraseblocks, 1 eraseblock at a time */
410 printk(PRINT_PREF "testing eraseblock write speed\n"); 410 pr_info("testing eraseblock write speed\n");
411 start_timing(); 411 start_timing();
412 for (i = 0; i < ebcnt; ++i) { 412 for (i = 0; i < ebcnt; ++i) {
413 if (bbt[i]) 413 if (bbt[i])
@@ -419,10 +419,10 @@ static int __init mtd_speedtest_init(void)
419 } 419 }
420 stop_timing(); 420 stop_timing();
421 speed = calc_speed(); 421 speed = calc_speed();
422 printk(PRINT_PREF "eraseblock write speed is %ld KiB/s\n", speed); 422 pr_info("eraseblock write speed is %ld KiB/s\n", speed);
423 423
424 /* Read all eraseblocks, 1 eraseblock at a time */ 424 /* Read all eraseblocks, 1 eraseblock at a time */
425 printk(PRINT_PREF "testing eraseblock read speed\n"); 425 pr_info("testing eraseblock read speed\n");
426 start_timing(); 426 start_timing();
427 for (i = 0; i < ebcnt; ++i) { 427 for (i = 0; i < ebcnt; ++i) {
428 if (bbt[i]) 428 if (bbt[i])
@@ -434,14 +434,14 @@ static int __init mtd_speedtest_init(void)
434 } 434 }
435 stop_timing(); 435 stop_timing();
436 speed = calc_speed(); 436 speed = calc_speed();
437 printk(PRINT_PREF "eraseblock read speed is %ld KiB/s\n", speed); 437 pr_info("eraseblock read speed is %ld KiB/s\n", speed);
438 438
439 err = erase_whole_device(); 439 err = erase_whole_device();
440 if (err) 440 if (err)
441 goto out; 441 goto out;
442 442
443 /* Write all eraseblocks, 1 page at a time */ 443 /* Write all eraseblocks, 1 page at a time */
444 printk(PRINT_PREF "testing page write speed\n"); 444 pr_info("testing page write speed\n");
445 start_timing(); 445 start_timing();
446 for (i = 0; i < ebcnt; ++i) { 446 for (i = 0; i < ebcnt; ++i) {
447 if (bbt[i]) 447 if (bbt[i])
@@ -453,10 +453,10 @@ static int __init mtd_speedtest_init(void)
453 } 453 }
454 stop_timing(); 454 stop_timing();
455 speed = calc_speed(); 455 speed = calc_speed();
456 printk(PRINT_PREF "page write speed is %ld KiB/s\n", speed); 456 pr_info("page write speed is %ld KiB/s\n", speed);
457 457
458 /* Read all eraseblocks, 1 page at a time */ 458 /* Read all eraseblocks, 1 page at a time */
459 printk(PRINT_PREF "testing page read speed\n"); 459 pr_info("testing page read speed\n");
460 start_timing(); 460 start_timing();
461 for (i = 0; i < ebcnt; ++i) { 461 for (i = 0; i < ebcnt; ++i) {
462 if (bbt[i]) 462 if (bbt[i])
@@ -468,14 +468,14 @@ static int __init mtd_speedtest_init(void)
468 } 468 }
469 stop_timing(); 469 stop_timing();
470 speed = calc_speed(); 470 speed = calc_speed();
471 printk(PRINT_PREF "page read speed is %ld KiB/s\n", speed); 471 pr_info("page read speed is %ld KiB/s\n", speed);
472 472
473 err = erase_whole_device(); 473 err = erase_whole_device();
474 if (err) 474 if (err)
475 goto out; 475 goto out;
476 476
477 /* Write all eraseblocks, 2 pages at a time */ 477 /* Write all eraseblocks, 2 pages at a time */
478 printk(PRINT_PREF "testing 2 page write speed\n"); 478 pr_info("testing 2 page write speed\n");
479 start_timing(); 479 start_timing();
480 for (i = 0; i < ebcnt; ++i) { 480 for (i = 0; i < ebcnt; ++i) {
481 if (bbt[i]) 481 if (bbt[i])
@@ -487,10 +487,10 @@ static int __init mtd_speedtest_init(void)
487 } 487 }
488 stop_timing(); 488 stop_timing();
489 speed = calc_speed(); 489 speed = calc_speed();
490 printk(PRINT_PREF "2 page write speed is %ld KiB/s\n", speed); 490 pr_info("2 page write speed is %ld KiB/s\n", speed);
491 491
492 /* Read all eraseblocks, 2 pages at a time */ 492 /* Read all eraseblocks, 2 pages at a time */
493 printk(PRINT_PREF "testing 2 page read speed\n"); 493 pr_info("testing 2 page read speed\n");
494 start_timing(); 494 start_timing();
495 for (i = 0; i < ebcnt; ++i) { 495 for (i = 0; i < ebcnt; ++i) {
496 if (bbt[i]) 496 if (bbt[i])
@@ -502,10 +502,10 @@ static int __init mtd_speedtest_init(void)
502 } 502 }
503 stop_timing(); 503 stop_timing();
504 speed = calc_speed(); 504 speed = calc_speed();
505 printk(PRINT_PREF "2 page read speed is %ld KiB/s\n", speed); 505 pr_info("2 page read speed is %ld KiB/s\n", speed);
506 506
507 /* Erase all eraseblocks */ 507 /* Erase all eraseblocks */
508 printk(PRINT_PREF "Testing erase speed\n"); 508 pr_info("Testing erase speed\n");
509 start_timing(); 509 start_timing();
510 for (i = 0; i < ebcnt; ++i) { 510 for (i = 0; i < ebcnt; ++i) {
511 if (bbt[i]) 511 if (bbt[i])
@@ -517,12 +517,12 @@ static int __init mtd_speedtest_init(void)
517 } 517 }
518 stop_timing(); 518 stop_timing();
519 speed = calc_speed(); 519 speed = calc_speed();
520 printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed); 520 pr_info("erase speed is %ld KiB/s\n", speed);
521 521
522 /* Multi-block erase all eraseblocks */ 522 /* Multi-block erase all eraseblocks */
523 for (k = 1; k < 7; k++) { 523 for (k = 1; k < 7; k++) {
524 blocks = 1 << k; 524 blocks = 1 << k;
525 printk(PRINT_PREF "Testing %dx multi-block erase speed\n", 525 pr_info("Testing %dx multi-block erase speed\n",
526 blocks); 526 blocks);
527 start_timing(); 527 start_timing();
528 for (i = 0; i < ebcnt; ) { 528 for (i = 0; i < ebcnt; ) {
@@ -541,16 +541,16 @@ static int __init mtd_speedtest_init(void)
541 } 541 }
542 stop_timing(); 542 stop_timing();
543 speed = calc_speed(); 543 speed = calc_speed();
544 printk(PRINT_PREF "%dx multi-block erase speed is %ld KiB/s\n", 544 pr_info("%dx multi-block erase speed is %ld KiB/s\n",
545 blocks, speed); 545 blocks, speed);
546 } 546 }
547 printk(PRINT_PREF "finished\n"); 547 pr_info("finished\n");
548out: 548out:
549 kfree(iobuf); 549 kfree(iobuf);
550 kfree(bbt); 550 kfree(bbt);
551 put_mtd_device(mtd); 551 put_mtd_device(mtd);
552 if (err) 552 if (err)
553 printk(PRINT_PREF "error %d occurred\n", err); 553 pr_info("error %d occurred\n", err);
554 printk(KERN_INFO "=================================================\n"); 554 printk(KERN_INFO "=================================================\n");
555 return err; 555 return err;
556} 556}
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index cb268cebf01a..3729f679ae5d 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -19,6 +19,8 @@
19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> 19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
22#include <linux/init.h> 24#include <linux/init.h>
23#include <linux/module.h> 25#include <linux/module.h>
24#include <linux/moduleparam.h> 26#include <linux/moduleparam.h>
@@ -29,8 +31,6 @@
29#include <linux/vmalloc.h> 31#include <linux/vmalloc.h>
30#include <linux/random.h> 32#include <linux/random.h>
31 33
32#define PRINT_PREF KERN_INFO "mtd_stresstest: "
33
34static int dev = -EINVAL; 34static int dev = -EINVAL;
35module_param(dev, int, S_IRUGO); 35module_param(dev, int, S_IRUGO);
36MODULE_PARM_DESC(dev, "MTD device number to use"); 36MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -94,12 +94,12 @@ static int erase_eraseblock(int ebnum)
94 94
95 err = mtd_erase(mtd, &ei); 95 err = mtd_erase(mtd, &ei);
96 if (unlikely(err)) { 96 if (unlikely(err)) {
97 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 97 pr_err("error %d while erasing EB %d\n", err, ebnum);
98 return err; 98 return err;
99 } 99 }
100 100
101 if (unlikely(ei.state == MTD_ERASE_FAILED)) { 101 if (unlikely(ei.state == MTD_ERASE_FAILED)) {
102 printk(PRINT_PREF "some erase error occurred at EB %d\n", 102 pr_err("some erase error occurred at EB %d\n",
103 ebnum); 103 ebnum);
104 return -EIO; 104 return -EIO;
105 } 105 }
@@ -114,7 +114,7 @@ static int is_block_bad(int ebnum)
114 114
115 ret = mtd_block_isbad(mtd, addr); 115 ret = mtd_block_isbad(mtd, addr);
116 if (ret) 116 if (ret)
117 printk(PRINT_PREF "block %d is bad\n", ebnum); 117 pr_info("block %d is bad\n", ebnum);
118 return ret; 118 return ret;
119} 119}
120 120
@@ -137,7 +137,7 @@ static int do_read(void)
137 if (mtd_is_bitflip(err)) 137 if (mtd_is_bitflip(err))
138 err = 0; 138 err = 0;
139 if (unlikely(err || read != len)) { 139 if (unlikely(err || read != len)) {
140 printk(PRINT_PREF "error: read failed at 0x%llx\n", 140 pr_err("error: read failed at 0x%llx\n",
141 (long long)addr); 141 (long long)addr);
142 if (!err) 142 if (!err)
143 err = -EINVAL; 143 err = -EINVAL;
@@ -174,7 +174,7 @@ static int do_write(void)
174 addr = eb * mtd->erasesize + offs; 174 addr = eb * mtd->erasesize + offs;
175 err = mtd_write(mtd, addr, len, &written, writebuf); 175 err = mtd_write(mtd, addr, len, &written, writebuf);
176 if (unlikely(err || written != len)) { 176 if (unlikely(err || written != len)) {
177 printk(PRINT_PREF "error: write failed at 0x%llx\n", 177 pr_err("error: write failed at 0x%llx\n",
178 (long long)addr); 178 (long long)addr);
179 if (!err) 179 if (!err)
180 err = -EINVAL; 180 err = -EINVAL;
@@ -203,21 +203,21 @@ static int scan_for_bad_eraseblocks(void)
203 203
204 bbt = kzalloc(ebcnt, GFP_KERNEL); 204 bbt = kzalloc(ebcnt, GFP_KERNEL);
205 if (!bbt) { 205 if (!bbt) {
206 printk(PRINT_PREF "error: cannot allocate memory\n"); 206 pr_err("error: cannot allocate memory\n");
207 return -ENOMEM; 207 return -ENOMEM;
208 } 208 }
209 209
210 if (!mtd_can_have_bb(mtd)) 210 if (!mtd_can_have_bb(mtd))
211 return 0; 211 return 0;
212 212
213 printk(PRINT_PREF "scanning for bad eraseblocks\n"); 213 pr_info("scanning for bad eraseblocks\n");
214 for (i = 0; i < ebcnt; ++i) { 214 for (i = 0; i < ebcnt; ++i) {
215 bbt[i] = is_block_bad(i) ? 1 : 0; 215 bbt[i] = is_block_bad(i) ? 1 : 0;
216 if (bbt[i]) 216 if (bbt[i])
217 bad += 1; 217 bad += 1;
218 cond_resched(); 218 cond_resched();
219 } 219 }
220 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); 220 pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
221 return 0; 221 return 0;
222} 222}
223 223
@@ -231,22 +231,22 @@ static int __init mtd_stresstest_init(void)
231 printk(KERN_INFO "=================================================\n"); 231 printk(KERN_INFO "=================================================\n");
232 232
233 if (dev < 0) { 233 if (dev < 0) {
234 printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 234 pr_info("Please specify a valid mtd-device via module parameter\n");
235 printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 235 pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
236 return -EINVAL; 236 return -EINVAL;
237 } 237 }
238 238
239 printk(PRINT_PREF "MTD device: %d\n", dev); 239 pr_info("MTD device: %d\n", dev);
240 240
241 mtd = get_mtd_device(NULL, dev); 241 mtd = get_mtd_device(NULL, dev);
242 if (IS_ERR(mtd)) { 242 if (IS_ERR(mtd)) {
243 err = PTR_ERR(mtd); 243 err = PTR_ERR(mtd);
244 printk(PRINT_PREF "error: cannot get MTD device\n"); 244 pr_err("error: cannot get MTD device\n");
245 return err; 245 return err;
246 } 246 }
247 247
248 if (mtd->writesize == 1) { 248 if (mtd->writesize == 1) {
249 printk(PRINT_PREF "not NAND flash, assume page size is 512 " 249 pr_info("not NAND flash, assume page size is 512 "
250 "bytes.\n"); 250 "bytes.\n");
251 pgsize = 512; 251 pgsize = 512;
252 } else 252 } else
@@ -257,14 +257,14 @@ static int __init mtd_stresstest_init(void)
257 ebcnt = tmp; 257 ebcnt = tmp;
258 pgcnt = mtd->erasesize / pgsize; 258 pgcnt = mtd->erasesize / pgsize;
259 259
260 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 260 pr_info("MTD device size %llu, eraseblock size %u, "
261 "page size %u, count of eraseblocks %u, pages per " 261 "page size %u, count of eraseblocks %u, pages per "
262 "eraseblock %u, OOB size %u\n", 262 "eraseblock %u, OOB size %u\n",
263 (unsigned long long)mtd->size, mtd->erasesize, 263 (unsigned long long)mtd->size, mtd->erasesize,
264 pgsize, ebcnt, pgcnt, mtd->oobsize); 264 pgsize, ebcnt, pgcnt, mtd->oobsize);
265 265
266 if (ebcnt < 2) { 266 if (ebcnt < 2) {
267 printk(PRINT_PREF "error: need at least 2 eraseblocks\n"); 267 pr_err("error: need at least 2 eraseblocks\n");
268 err = -ENOSPC; 268 err = -ENOSPC;
269 goto out_put_mtd; 269 goto out_put_mtd;
270 } 270 }
@@ -277,7 +277,7 @@ static int __init mtd_stresstest_init(void)
277 writebuf = vmalloc(bufsize); 277 writebuf = vmalloc(bufsize);
278 offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL); 278 offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL);
279 if (!readbuf || !writebuf || !offsets) { 279 if (!readbuf || !writebuf || !offsets) {
280 printk(PRINT_PREF "error: cannot allocate memory\n"); 280 pr_err("error: cannot allocate memory\n");
281 goto out; 281 goto out;
282 } 282 }
283 for (i = 0; i < ebcnt; i++) 283 for (i = 0; i < ebcnt; i++)
@@ -290,16 +290,16 @@ static int __init mtd_stresstest_init(void)
290 goto out; 290 goto out;
291 291
292 /* Do operations */ 292 /* Do operations */
293 printk(PRINT_PREF "doing operations\n"); 293 pr_info("doing operations\n");
294 for (op = 0; op < count; op++) { 294 for (op = 0; op < count; op++) {
295 if ((op & 1023) == 0) 295 if ((op & 1023) == 0)
296 printk(PRINT_PREF "%d operations done\n", op); 296 pr_info("%d operations done\n", op);
297 err = do_operation(); 297 err = do_operation();
298 if (err) 298 if (err)
299 goto out; 299 goto out;
300 cond_resched(); 300 cond_resched();
301 } 301 }
302 printk(PRINT_PREF "finished, %d operations done\n", op); 302 pr_info("finished, %d operations done\n", op);
303 303
304out: 304out:
305 kfree(offsets); 305 kfree(offsets);
@@ -309,7 +309,7 @@ out:
309out_put_mtd: 309out_put_mtd:
310 put_mtd_device(mtd); 310 put_mtd_device(mtd);
311 if (err) 311 if (err)
312 printk(PRINT_PREF "error %d occurred\n", err); 312 pr_info("error %d occurred\n", err);
313 printk(KERN_INFO "=================================================\n"); 313 printk(KERN_INFO "=================================================\n");
314 return err; 314 return err;
315} 315}
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index 9667bf535282..c880c2229c59 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -19,6 +19,8 @@
19 * 19 *
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
22#include <linux/init.h> 24#include <linux/init.h>
23#include <linux/module.h> 25#include <linux/module.h>
24#include <linux/moduleparam.h> 26#include <linux/moduleparam.h>
@@ -27,8 +29,6 @@
27#include <linux/slab.h> 29#include <linux/slab.h>
28#include <linux/sched.h> 30#include <linux/sched.h>
29 31
30#define PRINT_PREF KERN_INFO "mtd_subpagetest: "
31
32static int dev = -EINVAL; 32static int dev = -EINVAL;
33module_param(dev, int, S_IRUGO); 33module_param(dev, int, S_IRUGO);
34MODULE_PARM_DESC(dev, "MTD device number to use"); 34MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -82,12 +82,12 @@ static int erase_eraseblock(int ebnum)
82 82
83 err = mtd_erase(mtd, &ei); 83 err = mtd_erase(mtd, &ei);
84 if (err) { 84 if (err) {
85 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 85 pr_err("error %d while erasing EB %d\n", err, ebnum);
86 return err; 86 return err;
87 } 87 }
88 88
89 if (ei.state == MTD_ERASE_FAILED) { 89 if (ei.state == MTD_ERASE_FAILED) {
90 printk(PRINT_PREF "some erase error occurred at EB %d\n", 90 pr_err("some erase error occurred at EB %d\n",
91 ebnum); 91 ebnum);
92 return -EIO; 92 return -EIO;
93 } 93 }
@@ -100,7 +100,7 @@ static int erase_whole_device(void)
100 int err; 100 int err;
101 unsigned int i; 101 unsigned int i;
102 102
103 printk(PRINT_PREF "erasing whole device\n"); 103 pr_info("erasing whole device\n");
104 for (i = 0; i < ebcnt; ++i) { 104 for (i = 0; i < ebcnt; ++i) {
105 if (bbt[i]) 105 if (bbt[i])
106 continue; 106 continue;
@@ -109,7 +109,7 @@ static int erase_whole_device(void)
109 return err; 109 return err;
110 cond_resched(); 110 cond_resched();
111 } 111 }
112 printk(PRINT_PREF "erased %u eraseblocks\n", i); 112 pr_info("erased %u eraseblocks\n", i);
113 return 0; 113 return 0;
114} 114}
115 115
@@ -122,11 +122,11 @@ static int write_eraseblock(int ebnum)
122 set_random_data(writebuf, subpgsize); 122 set_random_data(writebuf, subpgsize);
123 err = mtd_write(mtd, addr, subpgsize, &written, writebuf); 123 err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
124 if (unlikely(err || written != subpgsize)) { 124 if (unlikely(err || written != subpgsize)) {
125 printk(PRINT_PREF "error: write failed at %#llx\n", 125 pr_err("error: write failed at %#llx\n",
126 (long long)addr); 126 (long long)addr);
127 if (written != subpgsize) { 127 if (written != subpgsize) {
128 printk(PRINT_PREF " write size: %#x\n", subpgsize); 128 pr_err(" write size: %#x\n", subpgsize);
129 printk(PRINT_PREF " written: %#zx\n", written); 129 pr_err(" written: %#zx\n", written);
130 } 130 }
131 return err ? err : -1; 131 return err ? err : -1;
132 } 132 }
@@ -136,11 +136,11 @@ static int write_eraseblock(int ebnum)
136 set_random_data(writebuf, subpgsize); 136 set_random_data(writebuf, subpgsize);
137 err = mtd_write(mtd, addr, subpgsize, &written, writebuf); 137 err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
138 if (unlikely(err || written != subpgsize)) { 138 if (unlikely(err || written != subpgsize)) {
139 printk(PRINT_PREF "error: write failed at %#llx\n", 139 pr_err("error: write failed at %#llx\n",
140 (long long)addr); 140 (long long)addr);
141 if (written != subpgsize) { 141 if (written != subpgsize) {
142 printk(PRINT_PREF " write size: %#x\n", subpgsize); 142 pr_err(" write size: %#x\n", subpgsize);
143 printk(PRINT_PREF " written: %#zx\n", written); 143 pr_err(" written: %#zx\n", written);
144 } 144 }
145 return err ? err : -1; 145 return err ? err : -1;
146 } 146 }
@@ -160,12 +160,12 @@ static int write_eraseblock2(int ebnum)
160 set_random_data(writebuf, subpgsize * k); 160 set_random_data(writebuf, subpgsize * k);
161 err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf); 161 err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
162 if (unlikely(err || written != subpgsize * k)) { 162 if (unlikely(err || written != subpgsize * k)) {
163 printk(PRINT_PREF "error: write failed at %#llx\n", 163 pr_err("error: write failed at %#llx\n",
164 (long long)addr); 164 (long long)addr);
165 if (written != subpgsize) { 165 if (written != subpgsize) {
166 printk(PRINT_PREF " write size: %#x\n", 166 pr_err(" write size: %#x\n",
167 subpgsize * k); 167 subpgsize * k);
168 printk(PRINT_PREF " written: %#08zx\n", 168 pr_err(" written: %#08zx\n",
169 written); 169 written);
170 } 170 }
171 return err ? err : -1; 171 return err ? err : -1;
@@ -198,23 +198,23 @@ static int verify_eraseblock(int ebnum)
198 err = mtd_read(mtd, addr, subpgsize, &read, readbuf); 198 err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
199 if (unlikely(err || read != subpgsize)) { 199 if (unlikely(err || read != subpgsize)) {
200 if (mtd_is_bitflip(err) && read == subpgsize) { 200 if (mtd_is_bitflip(err) && read == subpgsize) {
201 printk(PRINT_PREF "ECC correction at %#llx\n", 201 pr_info("ECC correction at %#llx\n",
202 (long long)addr); 202 (long long)addr);
203 err = 0; 203 err = 0;
204 } else { 204 } else {
205 printk(PRINT_PREF "error: read failed at %#llx\n", 205 pr_err("error: read failed at %#llx\n",
206 (long long)addr); 206 (long long)addr);
207 return err ? err : -1; 207 return err ? err : -1;
208 } 208 }
209 } 209 }
210 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) { 210 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
211 printk(PRINT_PREF "error: verify failed at %#llx\n", 211 pr_err("error: verify failed at %#llx\n",
212 (long long)addr); 212 (long long)addr);
213 printk(PRINT_PREF "------------- written----------------\n"); 213 pr_info("------------- written----------------\n");
214 print_subpage(writebuf); 214 print_subpage(writebuf);
215 printk(PRINT_PREF "------------- read ------------------\n"); 215 pr_info("------------- read ------------------\n");
216 print_subpage(readbuf); 216 print_subpage(readbuf);
217 printk(PRINT_PREF "-------------------------------------\n"); 217 pr_info("-------------------------------------\n");
218 errcnt += 1; 218 errcnt += 1;
219 } 219 }
220 220
@@ -225,23 +225,23 @@ static int verify_eraseblock(int ebnum)
225 err = mtd_read(mtd, addr, subpgsize, &read, readbuf); 225 err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
226 if (unlikely(err || read != subpgsize)) { 226 if (unlikely(err || read != subpgsize)) {
227 if (mtd_is_bitflip(err) && read == subpgsize) { 227 if (mtd_is_bitflip(err) && read == subpgsize) {
228 printk(PRINT_PREF "ECC correction at %#llx\n", 228 pr_info("ECC correction at %#llx\n",
229 (long long)addr); 229 (long long)addr);
230 err = 0; 230 err = 0;
231 } else { 231 } else {
232 printk(PRINT_PREF "error: read failed at %#llx\n", 232 pr_err("error: read failed at %#llx\n",
233 (long long)addr); 233 (long long)addr);
234 return err ? err : -1; 234 return err ? err : -1;
235 } 235 }
236 } 236 }
237 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) { 237 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
238 printk(PRINT_PREF "error: verify failed at %#llx\n", 238 pr_info("error: verify failed at %#llx\n",
239 (long long)addr); 239 (long long)addr);
240 printk(PRINT_PREF "------------- written----------------\n"); 240 pr_info("------------- written----------------\n");
241 print_subpage(writebuf); 241 print_subpage(writebuf);
242 printk(PRINT_PREF "------------- read ------------------\n"); 242 pr_info("------------- read ------------------\n");
243 print_subpage(readbuf); 243 print_subpage(readbuf);
244 printk(PRINT_PREF "-------------------------------------\n"); 244 pr_info("-------------------------------------\n");
245 errcnt += 1; 245 errcnt += 1;
246 } 246 }
247 247
@@ -262,17 +262,17 @@ static int verify_eraseblock2(int ebnum)
262 err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf); 262 err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf);
263 if (unlikely(err || read != subpgsize * k)) { 263 if (unlikely(err || read != subpgsize * k)) {
264 if (mtd_is_bitflip(err) && read == subpgsize * k) { 264 if (mtd_is_bitflip(err) && read == subpgsize * k) {
265 printk(PRINT_PREF "ECC correction at %#llx\n", 265 pr_info("ECC correction at %#llx\n",
266 (long long)addr); 266 (long long)addr);
267 err = 0; 267 err = 0;
268 } else { 268 } else {
269 printk(PRINT_PREF "error: read failed at " 269 pr_err("error: read failed at "
270 "%#llx\n", (long long)addr); 270 "%#llx\n", (long long)addr);
271 return err ? err : -1; 271 return err ? err : -1;
272 } 272 }
273 } 273 }
274 if (unlikely(memcmp(readbuf, writebuf, subpgsize * k))) { 274 if (unlikely(memcmp(readbuf, writebuf, subpgsize * k))) {
275 printk(PRINT_PREF "error: verify failed at %#llx\n", 275 pr_err("error: verify failed at %#llx\n",
276 (long long)addr); 276 (long long)addr);
277 errcnt += 1; 277 errcnt += 1;
278 } 278 }
@@ -295,17 +295,17 @@ static int verify_eraseblock_ff(int ebnum)
295 err = mtd_read(mtd, addr, subpgsize, &read, readbuf); 295 err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
296 if (unlikely(err || read != subpgsize)) { 296 if (unlikely(err || read != subpgsize)) {
297 if (mtd_is_bitflip(err) && read == subpgsize) { 297 if (mtd_is_bitflip(err) && read == subpgsize) {
298 printk(PRINT_PREF "ECC correction at %#llx\n", 298 pr_info("ECC correction at %#llx\n",
299 (long long)addr); 299 (long long)addr);
300 err = 0; 300 err = 0;
301 } else { 301 } else {
302 printk(PRINT_PREF "error: read failed at " 302 pr_err("error: read failed at "
303 "%#llx\n", (long long)addr); 303 "%#llx\n", (long long)addr);
304 return err ? err : -1; 304 return err ? err : -1;
305 } 305 }
306 } 306 }
307 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) { 307 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
308 printk(PRINT_PREF "error: verify 0xff failed at " 308 pr_err("error: verify 0xff failed at "
309 "%#llx\n", (long long)addr); 309 "%#llx\n", (long long)addr);
310 errcnt += 1; 310 errcnt += 1;
311 } 311 }
@@ -320,7 +320,7 @@ static int verify_all_eraseblocks_ff(void)
320 int err; 320 int err;
321 unsigned int i; 321 unsigned int i;
322 322
323 printk(PRINT_PREF "verifying all eraseblocks for 0xff\n"); 323 pr_info("verifying all eraseblocks for 0xff\n");
324 for (i = 0; i < ebcnt; ++i) { 324 for (i = 0; i < ebcnt; ++i) {
325 if (bbt[i]) 325 if (bbt[i])
326 continue; 326 continue;
@@ -328,10 +328,10 @@ static int verify_all_eraseblocks_ff(void)
328 if (err) 328 if (err)
329 return err; 329 return err;
330 if (i % 256 == 0) 330 if (i % 256 == 0)
331 printk(PRINT_PREF "verified up to eraseblock %u\n", i); 331 pr_info("verified up to eraseblock %u\n", i);
332 cond_resched(); 332 cond_resched();
333 } 333 }
334 printk(PRINT_PREF "verified %u eraseblocks\n", i); 334 pr_info("verified %u eraseblocks\n", i);
335 return 0; 335 return 0;
336} 336}
337 337
@@ -342,7 +342,7 @@ static int is_block_bad(int ebnum)
342 342
343 ret = mtd_block_isbad(mtd, addr); 343 ret = mtd_block_isbad(mtd, addr);
344 if (ret) 344 if (ret)
345 printk(PRINT_PREF "block %d is bad\n", ebnum); 345 pr_info("block %d is bad\n", ebnum);
346 return ret; 346 return ret;
347} 347}
348 348
@@ -352,18 +352,18 @@ static int scan_for_bad_eraseblocks(void)
352 352
353 bbt = kzalloc(ebcnt, GFP_KERNEL); 353 bbt = kzalloc(ebcnt, GFP_KERNEL);
354 if (!bbt) { 354 if (!bbt) {
355 printk(PRINT_PREF "error: cannot allocate memory\n"); 355 pr_err("error: cannot allocate memory\n");
356 return -ENOMEM; 356 return -ENOMEM;
357 } 357 }
358 358
359 printk(PRINT_PREF "scanning for bad eraseblocks\n"); 359 pr_info("scanning for bad eraseblocks\n");
360 for (i = 0; i < ebcnt; ++i) { 360 for (i = 0; i < ebcnt; ++i) {
361 bbt[i] = is_block_bad(i) ? 1 : 0; 361 bbt[i] = is_block_bad(i) ? 1 : 0;
362 if (bbt[i]) 362 if (bbt[i])
363 bad += 1; 363 bad += 1;
364 cond_resched(); 364 cond_resched();
365 } 365 }
366 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); 366 pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
367 return 0; 367 return 0;
368} 368}
369 369
@@ -377,22 +377,22 @@ static int __init mtd_subpagetest_init(void)
377 printk(KERN_INFO "=================================================\n"); 377 printk(KERN_INFO "=================================================\n");
378 378
379 if (dev < 0) { 379 if (dev < 0) {
380 printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 380 pr_info("Please specify a valid mtd-device via module parameter\n");
381 printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 381 pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
382 return -EINVAL; 382 return -EINVAL;
383 } 383 }
384 384
385 printk(PRINT_PREF "MTD device: %d\n", dev); 385 pr_info("MTD device: %d\n", dev);
386 386
387 mtd = get_mtd_device(NULL, dev); 387 mtd = get_mtd_device(NULL, dev);
388 if (IS_ERR(mtd)) { 388 if (IS_ERR(mtd)) {
389 err = PTR_ERR(mtd); 389 err = PTR_ERR(mtd);
390 printk(PRINT_PREF "error: cannot get MTD device\n"); 390 pr_err("error: cannot get MTD device\n");
391 return err; 391 return err;
392 } 392 }
393 393
394 if (mtd->type != MTD_NANDFLASH) { 394 if (mtd->type != MTD_NANDFLASH) {
395 printk(PRINT_PREF "this test requires NAND flash\n"); 395 pr_info("this test requires NAND flash\n");
396 goto out; 396 goto out;
397 } 397 }
398 398
@@ -402,7 +402,7 @@ static int __init mtd_subpagetest_init(void)
402 ebcnt = tmp; 402 ebcnt = tmp;
403 pgcnt = mtd->erasesize / mtd->writesize; 403 pgcnt = mtd->erasesize / mtd->writesize;
404 404
405 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 405 pr_info("MTD device size %llu, eraseblock size %u, "
406 "page size %u, subpage size %u, count of eraseblocks %u, " 406 "page size %u, subpage size %u, count of eraseblocks %u, "
407 "pages per eraseblock %u, OOB size %u\n", 407 "pages per eraseblock %u, OOB size %u\n",
408 (unsigned long long)mtd->size, mtd->erasesize, 408 (unsigned long long)mtd->size, mtd->erasesize,
@@ -412,12 +412,12 @@ static int __init mtd_subpagetest_init(void)
412 bufsize = subpgsize * 32; 412 bufsize = subpgsize * 32;
413 writebuf = kmalloc(bufsize, GFP_KERNEL); 413 writebuf = kmalloc(bufsize, GFP_KERNEL);
414 if (!writebuf) { 414 if (!writebuf) {
415 printk(PRINT_PREF "error: cannot allocate memory\n"); 415 pr_info("error: cannot allocate memory\n");
416 goto out; 416 goto out;
417 } 417 }
418 readbuf = kmalloc(bufsize, GFP_KERNEL); 418 readbuf = kmalloc(bufsize, GFP_KERNEL);
419 if (!readbuf) { 419 if (!readbuf) {
420 printk(PRINT_PREF "error: cannot allocate memory\n"); 420 pr_info("error: cannot allocate memory\n");
421 goto out; 421 goto out;
422 } 422 }
423 423
@@ -429,7 +429,7 @@ static int __init mtd_subpagetest_init(void)
429 if (err) 429 if (err)
430 goto out; 430 goto out;
431 431
432 printk(PRINT_PREF "writing whole device\n"); 432 pr_info("writing whole device\n");
433 simple_srand(1); 433 simple_srand(1);
434 for (i = 0; i < ebcnt; ++i) { 434 for (i = 0; i < ebcnt; ++i) {
435 if (bbt[i]) 435 if (bbt[i])
@@ -438,13 +438,13 @@ static int __init mtd_subpagetest_init(void)
438 if (unlikely(err)) 438 if (unlikely(err))
439 goto out; 439 goto out;
440 if (i % 256 == 0) 440 if (i % 256 == 0)
441 printk(PRINT_PREF "written up to eraseblock %u\n", i); 441 pr_info("written up to eraseblock %u\n", i);
442 cond_resched(); 442 cond_resched();
443 } 443 }
444 printk(PRINT_PREF "written %u eraseblocks\n", i); 444 pr_info("written %u eraseblocks\n", i);
445 445
446 simple_srand(1); 446 simple_srand(1);
447 printk(PRINT_PREF "verifying all eraseblocks\n"); 447 pr_info("verifying all eraseblocks\n");
448 for (i = 0; i < ebcnt; ++i) { 448 for (i = 0; i < ebcnt; ++i) {
449 if (bbt[i]) 449 if (bbt[i])
450 continue; 450 continue;
@@ -452,10 +452,10 @@ static int __init mtd_subpagetest_init(void)
452 if (unlikely(err)) 452 if (unlikely(err))
453 goto out; 453 goto out;
454 if (i % 256 == 0) 454 if (i % 256 == 0)
455 printk(PRINT_PREF "verified up to eraseblock %u\n", i); 455 pr_info("verified up to eraseblock %u\n", i);
456 cond_resched(); 456 cond_resched();
457 } 457 }
458 printk(PRINT_PREF "verified %u eraseblocks\n", i); 458 pr_info("verified %u eraseblocks\n", i);
459 459
460 err = erase_whole_device(); 460 err = erase_whole_device();
461 if (err) 461 if (err)
@@ -467,7 +467,7 @@ static int __init mtd_subpagetest_init(void)
467 467
468 /* Write all eraseblocks */ 468 /* Write all eraseblocks */
469 simple_srand(3); 469 simple_srand(3);
470 printk(PRINT_PREF "writing whole device\n"); 470 pr_info("writing whole device\n");
471 for (i = 0; i < ebcnt; ++i) { 471 for (i = 0; i < ebcnt; ++i) {
472 if (bbt[i]) 472 if (bbt[i])
473 continue; 473 continue;
@@ -475,14 +475,14 @@ static int __init mtd_subpagetest_init(void)
475 if (unlikely(err)) 475 if (unlikely(err))
476 goto out; 476 goto out;
477 if (i % 256 == 0) 477 if (i % 256 == 0)
478 printk(PRINT_PREF "written up to eraseblock %u\n", i); 478 pr_info("written up to eraseblock %u\n", i);
479 cond_resched(); 479 cond_resched();
480 } 480 }
481 printk(PRINT_PREF "written %u eraseblocks\n", i); 481 pr_info("written %u eraseblocks\n", i);
482 482
483 /* Check all eraseblocks */ 483 /* Check all eraseblocks */
484 simple_srand(3); 484 simple_srand(3);
485 printk(PRINT_PREF "verifying all eraseblocks\n"); 485 pr_info("verifying all eraseblocks\n");
486 for (i = 0; i < ebcnt; ++i) { 486 for (i = 0; i < ebcnt; ++i) {
487 if (bbt[i]) 487 if (bbt[i])
488 continue; 488 continue;
@@ -490,10 +490,10 @@ static int __init mtd_subpagetest_init(void)
490 if (unlikely(err)) 490 if (unlikely(err))
491 goto out; 491 goto out;
492 if (i % 256 == 0) 492 if (i % 256 == 0)
493 printk(PRINT_PREF "verified up to eraseblock %u\n", i); 493 pr_info("verified up to eraseblock %u\n", i);
494 cond_resched(); 494 cond_resched();
495 } 495 }
496 printk(PRINT_PREF "verified %u eraseblocks\n", i); 496 pr_info("verified %u eraseblocks\n", i);
497 497
498 err = erase_whole_device(); 498 err = erase_whole_device();
499 if (err) 499 if (err)
@@ -503,7 +503,7 @@ static int __init mtd_subpagetest_init(void)
503 if (err) 503 if (err)
504 goto out; 504 goto out;
505 505
506 printk(PRINT_PREF "finished with %d errors\n", errcnt); 506 pr_info("finished with %d errors\n", errcnt);
507 507
508out: 508out:
509 kfree(bbt); 509 kfree(bbt);
@@ -511,7 +511,7 @@ out:
511 kfree(writebuf); 511 kfree(writebuf);
512 put_mtd_device(mtd); 512 put_mtd_device(mtd);
513 if (err) 513 if (err)
514 printk(PRINT_PREF "error %d occurred\n", err); 514 pr_info("error %d occurred\n", err);
515 printk(KERN_INFO "=================================================\n"); 515 printk(KERN_INFO "=================================================\n");
516 return err; 516 return err;
517} 517}
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
index b65861bc7b8e..c4cde1e9eddb 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -23,6 +23,8 @@
23 * damage caused by this program. 23 * damage caused by this program.
24 */ 24 */
25 25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
26#include <linux/init.h> 28#include <linux/init.h>
27#include <linux/module.h> 29#include <linux/module.h>
28#include <linux/moduleparam.h> 30#include <linux/moduleparam.h>
@@ -31,7 +33,6 @@
31#include <linux/slab.h> 33#include <linux/slab.h>
32#include <linux/sched.h> 34#include <linux/sched.h>
33 35
34#define PRINT_PREF KERN_INFO "mtd_torturetest: "
35#define RETRIES 3 36#define RETRIES 3
36 37
37static int eb = 8; 38static int eb = 8;
@@ -107,12 +108,12 @@ static inline int erase_eraseblock(int ebnum)
107 108
108 err = mtd_erase(mtd, &ei); 109 err = mtd_erase(mtd, &ei);
109 if (err) { 110 if (err) {
110 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum); 111 pr_err("error %d while erasing EB %d\n", err, ebnum);
111 return err; 112 return err;
112 } 113 }
113 114
114 if (ei.state == MTD_ERASE_FAILED) { 115 if (ei.state == MTD_ERASE_FAILED) {
115 printk(PRINT_PREF "some erase error occurred at EB %d\n", 116 pr_err("some erase error occurred at EB %d\n",
116 ebnum); 117 ebnum);
117 return -EIO; 118 return -EIO;
118 } 119 }
@@ -139,40 +140,40 @@ static inline int check_eraseblock(int ebnum, unsigned char *buf)
139retry: 140retry:
140 err = mtd_read(mtd, addr, len, &read, check_buf); 141 err = mtd_read(mtd, addr, len, &read, check_buf);
141 if (mtd_is_bitflip(err)) 142 if (mtd_is_bitflip(err))
142 printk(PRINT_PREF "single bit flip occurred at EB %d " 143 pr_err("single bit flip occurred at EB %d "
143 "MTD reported that it was fixed.\n", ebnum); 144 "MTD reported that it was fixed.\n", ebnum);
144 else if (err) { 145 else if (err) {
145 printk(PRINT_PREF "error %d while reading EB %d, " 146 pr_err("error %d while reading EB %d, "
146 "read %zd\n", err, ebnum, read); 147 "read %zd\n", err, ebnum, read);
147 return err; 148 return err;
148 } 149 }
149 150
150 if (read != len) { 151 if (read != len) {
151 printk(PRINT_PREF "failed to read %zd bytes from EB %d, " 152 pr_err("failed to read %zd bytes from EB %d, "
152 "read only %zd, but no error reported\n", 153 "read only %zd, but no error reported\n",
153 len, ebnum, read); 154 len, ebnum, read);
154 return -EIO; 155 return -EIO;
155 } 156 }
156 157
157 if (memcmp(buf, check_buf, len)) { 158 if (memcmp(buf, check_buf, len)) {
158 printk(PRINT_PREF "read wrong data from EB %d\n", ebnum); 159 pr_err("read wrong data from EB %d\n", ebnum);
159 report_corrupt(check_buf, buf); 160 report_corrupt(check_buf, buf);
160 161
161 if (retries++ < RETRIES) { 162 if (retries++ < RETRIES) {
162 /* Try read again */ 163 /* Try read again */
163 yield(); 164 yield();
164 printk(PRINT_PREF "re-try reading data from EB %d\n", 165 pr_info("re-try reading data from EB %d\n",
165 ebnum); 166 ebnum);
166 goto retry; 167 goto retry;
167 } else { 168 } else {
168 printk(PRINT_PREF "retried %d times, still errors, " 169 pr_info("retried %d times, still errors, "
169 "give-up\n", RETRIES); 170 "give-up\n", RETRIES);
170 return -EINVAL; 171 return -EINVAL;
171 } 172 }
172 } 173 }
173 174
174 if (retries != 0) 175 if (retries != 0)
175 printk(PRINT_PREF "only attempt number %d was OK (!!!)\n", 176 pr_info("only attempt number %d was OK (!!!)\n",
176 retries); 177 retries);
177 178
178 return 0; 179 return 0;
@@ -191,12 +192,12 @@ static inline int write_pattern(int ebnum, void *buf)
191 } 192 }
192 err = mtd_write(mtd, addr, len, &written, buf); 193 err = mtd_write(mtd, addr, len, &written, buf);
193 if (err) { 194 if (err) {
194 printk(PRINT_PREF "error %d while writing EB %d, written %zd" 195 pr_err("error %d while writing EB %d, written %zd"
195 " bytes\n", err, ebnum, written); 196 " bytes\n", err, ebnum, written);
196 return err; 197 return err;
197 } 198 }
198 if (written != len) { 199 if (written != len) {
199 printk(PRINT_PREF "written only %zd bytes of %zd, but no error" 200 pr_info("written only %zd bytes of %zd, but no error"
200 " reported\n", written, len); 201 " reported\n", written, len);
201 return -EIO; 202 return -EIO;
202 } 203 }
@@ -211,64 +212,64 @@ static int __init tort_init(void)
211 212
212 printk(KERN_INFO "\n"); 213 printk(KERN_INFO "\n");
213 printk(KERN_INFO "=================================================\n"); 214 printk(KERN_INFO "=================================================\n");
214 printk(PRINT_PREF "Warning: this program is trying to wear out your " 215 pr_info("Warning: this program is trying to wear out your "
215 "flash, stop it if this is not wanted.\n"); 216 "flash, stop it if this is not wanted.\n");
216 217
217 if (dev < 0) { 218 if (dev < 0) {
218 printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); 219 pr_info("Please specify a valid mtd-device via module parameter\n");
219 printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); 220 pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
220 return -EINVAL; 221 return -EINVAL;
221 } 222 }
222 223
223 printk(PRINT_PREF "MTD device: %d\n", dev); 224 pr_info("MTD device: %d\n", dev);
224 printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n", 225 pr_info("torture %d eraseblocks (%d-%d) of mtd%d\n",
225 ebcnt, eb, eb + ebcnt - 1, dev); 226 ebcnt, eb, eb + ebcnt - 1, dev);
226 if (pgcnt) 227 if (pgcnt)
227 printk(PRINT_PREF "torturing just %d pages per eraseblock\n", 228 pr_info("torturing just %d pages per eraseblock\n",
228 pgcnt); 229 pgcnt);
229 printk(PRINT_PREF "write verify %s\n", check ? "enabled" : "disabled"); 230 pr_info("write verify %s\n", check ? "enabled" : "disabled");
230 231
231 mtd = get_mtd_device(NULL, dev); 232 mtd = get_mtd_device(NULL, dev);
232 if (IS_ERR(mtd)) { 233 if (IS_ERR(mtd)) {
233 err = PTR_ERR(mtd); 234 err = PTR_ERR(mtd);
234 printk(PRINT_PREF "error: cannot get MTD device\n"); 235 pr_err("error: cannot get MTD device\n");
235 return err; 236 return err;
236 } 237 }
237 238
238 if (mtd->writesize == 1) { 239 if (mtd->writesize == 1) {
239 printk(PRINT_PREF "not NAND flash, assume page size is 512 " 240 pr_info("not NAND flash, assume page size is 512 "
240 "bytes.\n"); 241 "bytes.\n");
241 pgsize = 512; 242 pgsize = 512;
242 } else 243 } else
243 pgsize = mtd->writesize; 244 pgsize = mtd->writesize;
244 245
245 if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) { 246 if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) {
246 printk(PRINT_PREF "error: invalid pgcnt value %d\n", pgcnt); 247 pr_err("error: invalid pgcnt value %d\n", pgcnt);
247 goto out_mtd; 248 goto out_mtd;
248 } 249 }
249 250
250 err = -ENOMEM; 251 err = -ENOMEM;
251 patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL); 252 patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL);
252 if (!patt_5A5) { 253 if (!patt_5A5) {
253 printk(PRINT_PREF "error: cannot allocate memory\n"); 254 pr_err("error: cannot allocate memory\n");
254 goto out_mtd; 255 goto out_mtd;
255 } 256 }
256 257
257 patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL); 258 patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL);
258 if (!patt_A5A) { 259 if (!patt_A5A) {
259 printk(PRINT_PREF "error: cannot allocate memory\n"); 260 pr_err("error: cannot allocate memory\n");
260 goto out_patt_5A5; 261 goto out_patt_5A5;
261 } 262 }
262 263
263 patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL); 264 patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL);
264 if (!patt_FF) { 265 if (!patt_FF) {
265 printk(PRINT_PREF "error: cannot allocate memory\n"); 266 pr_err("error: cannot allocate memory\n");
266 goto out_patt_A5A; 267 goto out_patt_A5A;
267 } 268 }
268 269
269 check_buf = kmalloc(mtd->erasesize, GFP_KERNEL); 270 check_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
270 if (!check_buf) { 271 if (!check_buf) {
271 printk(PRINT_PREF "error: cannot allocate memory\n"); 272 pr_err("error: cannot allocate memory\n");
272 goto out_patt_FF; 273 goto out_patt_FF;
273 } 274 }
274 275
@@ -295,13 +296,13 @@ static int __init tort_init(void)
295 err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize); 296 err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize);
296 297
297 if (err < 0) { 298 if (err < 0) {
298 printk(PRINT_PREF "block_isbad() returned %d " 299 pr_info("block_isbad() returned %d "
299 "for EB %d\n", err, i); 300 "for EB %d\n", err, i);
300 goto out; 301 goto out;
301 } 302 }
302 303
303 if (err) { 304 if (err) {
304 printk("EB %d is bad. Skip it.\n", i); 305 pr_err("EB %d is bad. Skip it.\n", i);
305 bad_ebs[i - eb] = 1; 306 bad_ebs[i - eb] = 1;
306 } 307 }
307 } 308 }
@@ -329,7 +330,7 @@ static int __init tort_init(void)
329 continue; 330 continue;
330 err = check_eraseblock(i, patt_FF); 331 err = check_eraseblock(i, patt_FF);
331 if (err) { 332 if (err) {
332 printk(PRINT_PREF "verify failed" 333 pr_info("verify failed"
333 " for 0xFF... pattern\n"); 334 " for 0xFF... pattern\n");
334 goto out; 335 goto out;
335 } 336 }
@@ -362,7 +363,7 @@ static int __init tort_init(void)
362 patt = patt_A5A; 363 patt = patt_A5A;
363 err = check_eraseblock(i, patt); 364 err = check_eraseblock(i, patt);
364 if (err) { 365 if (err) {
365 printk(PRINT_PREF "verify failed for %s" 366 pr_info("verify failed for %s"
366 " pattern\n", 367 " pattern\n",
367 ((eb + erase_cycles) & 1) ? 368 ((eb + erase_cycles) & 1) ?
368 "0x55AA55..." : "0xAA55AA..."); 369 "0x55AA55..." : "0xAA55AA...");
@@ -380,7 +381,7 @@ static int __init tort_init(void)
380 stop_timing(); 381 stop_timing();
381 ms = (finish.tv_sec - start.tv_sec) * 1000 + 382 ms = (finish.tv_sec - start.tv_sec) * 1000 +
382 (finish.tv_usec - start.tv_usec) / 1000; 383 (finish.tv_usec - start.tv_usec) / 1000;
383 printk(PRINT_PREF "%08u erase cycles done, took %lu " 384 pr_info("%08u erase cycles done, took %lu "
384 "milliseconds (%lu seconds)\n", 385 "milliseconds (%lu seconds)\n",
385 erase_cycles, ms, ms / 1000); 386 erase_cycles, ms, ms / 1000);
386 start_timing(); 387 start_timing();
@@ -391,7 +392,7 @@ static int __init tort_init(void)
391 } 392 }
392out: 393out:
393 394
394 printk(PRINT_PREF "finished after %u erase cycles\n", 395 pr_info("finished after %u erase cycles\n",
395 erase_cycles); 396 erase_cycles);
396 kfree(check_buf); 397 kfree(check_buf);
397out_patt_FF: 398out_patt_FF:
@@ -403,7 +404,7 @@ out_patt_5A5:
403out_mtd: 404out_mtd:
404 put_mtd_device(mtd); 405 put_mtd_device(mtd);
405 if (err) 406 if (err)
406 printk(PRINT_PREF "error %d occurred during torturing\n", err); 407 pr_info("error %d occurred during torturing\n", err);
407 printk(KERN_INFO "=================================================\n"); 408 printk(KERN_INFO "=================================================\n");
408 return err; 409 return err;
409} 410}
@@ -441,9 +442,9 @@ static void report_corrupt(unsigned char *read, unsigned char *written)
441 &bits) >= 0) 442 &bits) >= 0)
442 pages++; 443 pages++;
443 444
444 printk(PRINT_PREF "verify fails on %d pages, %d bytes/%d bits\n", 445 pr_info("verify fails on %d pages, %d bytes/%d bits\n",
445 pages, bytes, bits); 446 pages, bytes, bits);
446 printk(PRINT_PREF "The following is a list of all differences between" 447 pr_info("The following is a list of all differences between"
447 " what was read from flash and what was expected\n"); 448 " what was read from flash and what was expected\n");
448 449
449 for (i = 0; i < check_len; i += pgsize) { 450 for (i = 0; i < check_len; i += pgsize) {
@@ -457,7 +458,7 @@ static void report_corrupt(unsigned char *read, unsigned char *written)
457 printk("-------------------------------------------------------" 458 printk("-------------------------------------------------------"
458 "----------------------------------\n"); 459 "----------------------------------\n");
459 460
460 printk(PRINT_PREF "Page %zd has %d bytes/%d bits failing verify," 461 pr_info("Page %zd has %d bytes/%d bits failing verify,"
461 " starting at offset 0x%x\n", 462 " starting at offset 0x%x\n",
462 (mtd->erasesize - check_len + i) / pgsize, 463 (mtd->erasesize - check_len + i) / pgsize,
463 bytes, bits, first); 464 bytes, bits, first);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ef2cb2418535..b7d45f367d4a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4431,8 +4431,6 @@ static void bond_uninit(struct net_device *bond_dev)
4431 4431
4432 list_del(&bond->bond_list); 4432 list_del(&bond->bond_list);
4433 4433
4434 bond_work_cancel_all(bond);
4435
4436 bond_debug_unregister(bond); 4434 bond_debug_unregister(bond);
4437 4435
4438 __hw_addr_flush(&bond->mc_list); 4436 __hw_addr_flush(&bond->mc_list);
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 0f5917000aa2..6433b81256cd 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -121,7 +121,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
121 } 121 }
122 122
123 irq = irq_of_parse_and_map(np, 0); 123 irq = irq_of_parse_and_map(np, 0);
124 if (irq == NO_IRQ) { 124 if (irq == 0) {
125 dev_err(&ofdev->dev, "no irq found\n"); 125 dev_err(&ofdev->dev, "no irq found\n");
126 err = -ENODEV; 126 err = -ENODEV;
127 goto exit_unmap_mem; 127 goto exit_unmap_mem;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index abf26c7c1d19..3bc1912afba9 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -616,7 +616,7 @@ static inline bool be_error(struct be_adapter *adapter)
616 return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout; 616 return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
617} 617}
618 618
619static inline bool be_crit_error(struct be_adapter *adapter) 619static inline bool be_hw_error(struct be_adapter *adapter)
620{ 620{
621 return adapter->eeh_error || adapter->hw_error; 621 return adapter->eeh_error || adapter->hw_error;
622} 622}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index f2875aa47661..8a250c38fb82 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -298,7 +298,12 @@ void be_async_mcc_enable(struct be_adapter *adapter)
298 298
299void be_async_mcc_disable(struct be_adapter *adapter) 299void be_async_mcc_disable(struct be_adapter *adapter)
300{ 300{
301 spin_lock_bh(&adapter->mcc_cq_lock);
302
301 adapter->mcc_obj.rearm_cq = false; 303 adapter->mcc_obj.rearm_cq = false;
304 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
305
306 spin_unlock_bh(&adapter->mcc_cq_lock);
302} 307}
303 308
304int be_process_mcc(struct be_adapter *adapter) 309int be_process_mcc(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index f95612b907ae..9dca22be8125 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1689,15 +1689,41 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
1689 struct be_queue_info *rxq = &rxo->q; 1689 struct be_queue_info *rxq = &rxo->q;
1690 struct be_queue_info *rx_cq = &rxo->cq; 1690 struct be_queue_info *rx_cq = &rxo->cq;
1691 struct be_rx_compl_info *rxcp; 1691 struct be_rx_compl_info *rxcp;
1692 struct be_adapter *adapter = rxo->adapter;
1693 int flush_wait = 0;
1692 u16 tail; 1694 u16 tail;
1693 1695
1694 /* First cleanup pending rx completions */ 1696 /* Consume pending rx completions.
1695 while ((rxcp = be_rx_compl_get(rxo)) != NULL) { 1697 * Wait for the flush completion (identified by zero num_rcvd)
1696 be_rx_compl_discard(rxo, rxcp); 1698 * to arrive. Notify CQ even when there are no more CQ entries
1697 be_cq_notify(rxo->adapter, rx_cq->id, false, 1); 1699 * for HW to flush partially coalesced CQ entries.
1700 * In Lancer, there is no need to wait for flush compl.
1701 */
1702 for (;;) {
1703 rxcp = be_rx_compl_get(rxo);
1704 if (rxcp == NULL) {
1705 if (lancer_chip(adapter))
1706 break;
1707
1708 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1709 dev_warn(&adapter->pdev->dev,
1710 "did not receive flush compl\n");
1711 break;
1712 }
1713 be_cq_notify(adapter, rx_cq->id, true, 0);
1714 mdelay(1);
1715 } else {
1716 be_rx_compl_discard(rxo, rxcp);
1717 be_cq_notify(adapter, rx_cq->id, true, 1);
1718 if (rxcp->num_rcvd == 0)
1719 break;
1720 }
1698 } 1721 }
1699 1722
1700 /* Then free posted rx buffer that were not used */ 1723 /* After cleanup, leave the CQ in unarmed state */
1724 be_cq_notify(adapter, rx_cq->id, false, 0);
1725
1726 /* Then free posted rx buffers that were not used */
1701 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len; 1727 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1702 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) { 1728 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1703 page_info = get_rx_page_info(rxo, tail); 1729 page_info = get_rx_page_info(rxo, tail);
@@ -2157,7 +2183,7 @@ void be_detect_error(struct be_adapter *adapter)
2157 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; 2183 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2158 u32 i; 2184 u32 i;
2159 2185
2160 if (be_crit_error(adapter)) 2186 if (be_hw_error(adapter))
2161 return; 2187 return;
2162 2188
2163 if (lancer_chip(adapter)) { 2189 if (lancer_chip(adapter)) {
@@ -2398,13 +2424,22 @@ static int be_close(struct net_device *netdev)
2398 2424
2399 be_roce_dev_close(adapter); 2425 be_roce_dev_close(adapter);
2400 2426
2401 be_async_mcc_disable(adapter);
2402
2403 if (!lancer_chip(adapter)) 2427 if (!lancer_chip(adapter))
2404 be_intr_set(adapter, false); 2428 be_intr_set(adapter, false);
2405 2429
2406 for_all_evt_queues(adapter, eqo, i) { 2430 for_all_evt_queues(adapter, eqo, i)
2407 napi_disable(&eqo->napi); 2431 napi_disable(&eqo->napi);
2432
2433 be_async_mcc_disable(adapter);
2434
2435 /* Wait for all pending tx completions to arrive so that
2436 * all tx skbs are freed.
2437 */
2438 be_tx_compl_clean(adapter);
2439
2440 be_rx_qs_destroy(adapter);
2441
2442 for_all_evt_queues(adapter, eqo, i) {
2408 if (msix_enabled(adapter)) 2443 if (msix_enabled(adapter))
2409 synchronize_irq(be_msix_vec_get(adapter, eqo)); 2444 synchronize_irq(be_msix_vec_get(adapter, eqo));
2410 else 2445 else
@@ -2414,12 +2449,6 @@ static int be_close(struct net_device *netdev)
2414 2449
2415 be_irq_unregister(adapter); 2450 be_irq_unregister(adapter);
2416 2451
2417 /* Wait for all pending tx completions to arrive so that
2418 * all tx skbs are freed.
2419 */
2420 be_tx_compl_clean(adapter);
2421
2422 be_rx_qs_destroy(adapter);
2423 return 0; 2452 return 0;
2424} 2453}
2425 2454
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 5ba6e1cbd346..ec490d741fc0 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -94,9 +94,8 @@ config GIANFAR
94 94
95config FEC_PTP 95config FEC_PTP
96 bool "PTP Hardware Clock (PHC)" 96 bool "PTP Hardware Clock (PHC)"
97 depends on FEC && ARCH_MXC 97 depends on FEC && ARCH_MXC && !SOC_IMX25 && !SOC_IMX27 && !SOC_IMX35 && !SOC_IMX5
98 select PTP_1588_CLOCK 98 select PTP_1588_CLOCK
99 default y if SOC_IMX6Q
100 --help--- 99 --help---
101 Say Y here if you want to use PTP Hardware Clock (PHC) in the 100 Say Y here if you want to use PTP Hardware Clock (PHC) in the
102 driver. Only the basic clock operations have been implemented. 101 driver. Only the basic clock operations have been implemented.
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 83f0ea929d3d..8ebc352bcbe6 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4761,7 +4761,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal)
4761 struct ksz_dma_buf *dma_buf; 4761 struct ksz_dma_buf *dma_buf;
4762 struct net_device *dev = NULL; 4762 struct net_device *dev = NULL;
4763 4763
4764 spin_lock(&hw_priv->hwlock); 4764 spin_lock_irq(&hw_priv->hwlock);
4765 last = info->last; 4765 last = info->last;
4766 4766
4767 while (info->avail < info->alloc) { 4767 while (info->avail < info->alloc) {
@@ -4795,7 +4795,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal)
4795 info->avail++; 4795 info->avail++;
4796 } 4796 }
4797 info->last = last; 4797 info->last = last;
4798 spin_unlock(&hw_priv->hwlock); 4798 spin_unlock_irq(&hw_priv->hwlock);
4799 4799
4800 /* Notify the network subsystem that the packet has been sent. */ 4800 /* Notify the network subsystem that the packet has been sent. */
4801 if (dev) 4801 if (dev)
@@ -5259,11 +5259,15 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
5259 struct dev_info *hw_priv = priv->adapter; 5259 struct dev_info *hw_priv = priv->adapter;
5260 struct ksz_hw *hw = &hw_priv->hw; 5260 struct ksz_hw *hw = &hw_priv->hw;
5261 5261
5262 spin_lock(&hw_priv->hwlock);
5263
5262 hw_read_intr(hw, &int_enable); 5264 hw_read_intr(hw, &int_enable);
5263 5265
5264 /* Not our interrupt! */ 5266 /* Not our interrupt! */
5265 if (!int_enable) 5267 if (!int_enable) {
5268 spin_unlock(&hw_priv->hwlock);
5266 return IRQ_NONE; 5269 return IRQ_NONE;
5270 }
5267 5271
5268 do { 5272 do {
5269 hw_ack_intr(hw, int_enable); 5273 hw_ack_intr(hw, int_enable);
@@ -5310,6 +5314,8 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
5310 5314
5311 hw_ena_intr(hw); 5315 hw_ena_intr(hw);
5312 5316
5317 spin_unlock(&hw_priv->hwlock);
5318
5313 return IRQ_HANDLED; 5319 return IRQ_HANDLED;
5314} 5320}
5315 5321
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 537902479689..bc7ec64e9c7a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -36,8 +36,8 @@
36 36
37#define _QLCNIC_LINUX_MAJOR 5 37#define _QLCNIC_LINUX_MAJOR 5
38#define _QLCNIC_LINUX_MINOR 0 38#define _QLCNIC_LINUX_MINOR 0
39#define _QLCNIC_LINUX_SUBVERSION 29 39#define _QLCNIC_LINUX_SUBVERSION 30
40#define QLCNIC_LINUX_VERSIONID "5.0.29" 40#define QLCNIC_LINUX_VERSIONID "5.0.30"
41#define QLCNIC_DRV_IDC_VER 0x01 41#define QLCNIC_DRV_IDC_VER 0x01
42#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 42#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
43 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 43 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 58f094ca052e..b14b8f0787ea 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -134,7 +134,7 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
134 __le32 *tmp_buf; 134 __le32 *tmp_buf;
135 struct qlcnic_cmd_args cmd; 135 struct qlcnic_cmd_args cmd;
136 struct qlcnic_hardware_context *ahw; 136 struct qlcnic_hardware_context *ahw;
137 struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl; 137 struct qlcnic_dump_template_hdr *tmpl_hdr;
138 dma_addr_t tmp_addr_t = 0; 138 dma_addr_t tmp_addr_t = 0;
139 139
140 ahw = adapter->ahw; 140 ahw = adapter->ahw;
@@ -150,6 +150,8 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
150 } 150 }
151 temp_size = cmd.rsp.arg2; 151 temp_size = cmd.rsp.arg2;
152 version = cmd.rsp.arg3; 152 version = cmd.rsp.arg3;
153 dev_info(&adapter->pdev->dev,
154 "minidump template version = 0x%x", version);
153 if (!temp_size) 155 if (!temp_size)
154 return -EIO; 156 return -EIO;
155 157
@@ -174,7 +176,6 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
174 err = -EIO; 176 err = -EIO;
175 goto error; 177 goto error;
176 } 178 }
177 tmp_tmpl = tmp_addr;
178 ahw->fw_dump.tmpl_hdr = vzalloc(temp_size); 179 ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
179 if (!ahw->fw_dump.tmpl_hdr) { 180 if (!ahw->fw_dump.tmpl_hdr) {
180 err = -EIO; 181 err = -EIO;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index fc48e000f35f..7a6d5ebe4e0f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -365,7 +365,7 @@ static int
365qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter, 365qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
366 struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) 366 struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
367{ 367{
368 u32 i, producer, consumer; 368 u32 i, producer;
369 struct qlcnic_cmd_buffer *pbuf; 369 struct qlcnic_cmd_buffer *pbuf;
370 struct cmd_desc_type0 *cmd_desc; 370 struct cmd_desc_type0 *cmd_desc;
371 struct qlcnic_host_tx_ring *tx_ring; 371 struct qlcnic_host_tx_ring *tx_ring;
@@ -379,7 +379,6 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
379 __netif_tx_lock_bh(tx_ring->txq); 379 __netif_tx_lock_bh(tx_ring->txq);
380 380
381 producer = tx_ring->producer; 381 producer = tx_ring->producer;
382 consumer = tx_ring->sw_consumer;
383 382
384 if (nr_desc >= qlcnic_tx_avail(tx_ring)) { 383 if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
385 netif_tx_stop_queue(tx_ring->txq); 384 netif_tx_stop_queue(tx_ring->txq);
@@ -402,7 +401,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
402 pbuf->frag_count = 0; 401 pbuf->frag_count = 0;
403 402
404 memcpy(&tx_ring->desc_head[producer], 403 memcpy(&tx_ring->desc_head[producer],
405 &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); 404 cmd_desc, sizeof(struct cmd_desc_type0));
406 405
407 producer = get_next_index(producer, tx_ring->num_desc); 406 producer = get_next_index(producer, tx_ring->num_desc);
408 i++; 407 i++;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index a7554d9aab0c..d833f5927891 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -445,13 +445,10 @@ static int
445qlcnic_set_function_modes(struct qlcnic_adapter *adapter) 445qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
446{ 446{
447 u8 id; 447 u8 id;
448 u32 ref_count;
449 int i, ret = 1; 448 int i, ret = 1;
450 u32 data = QLCNIC_MGMT_FUNC; 449 u32 data = QLCNIC_MGMT_FUNC;
451 struct qlcnic_hardware_context *ahw = adapter->ahw; 450 struct qlcnic_hardware_context *ahw = adapter->ahw;
452 451
453 /* If other drivers are not in use set their privilege level */
454 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
455 ret = qlcnic_api_lock(adapter); 452 ret = qlcnic_api_lock(adapter);
456 if (ret) 453 if (ret)
457 goto err_lock; 454 goto err_lock;
@@ -531,11 +528,9 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
531{ 528{
532 u32 offset; 529 u32 offset;
533 void __iomem *mem_ptr0 = NULL; 530 void __iomem *mem_ptr0 = NULL;
534 resource_size_t mem_base;
535 unsigned long mem_len, pci_len0 = 0, bar0_len; 531 unsigned long mem_len, pci_len0 = 0, bar0_len;
536 532
537 /* remap phys address */ 533 /* remap phys address */
538 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
539 mem_len = pci_resource_len(pdev, 0); 534 mem_len = pci_resource_len(pdev, 0);
540 535
541 qlcnic_get_bar_length(pdev->device, &bar0_len); 536 qlcnic_get_bar_length(pdev->device, &bar0_len);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 12ff29270745..0b8d8625834c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -197,7 +197,7 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
197 int i, k, timeout = 0; 197 int i, k, timeout = 0;
198 void __iomem *base = adapter->ahw->pci_base0; 198 void __iomem *base = adapter->ahw->pci_base0;
199 u32 addr, data; 199 u32 addr, data;
200 u8 opcode, no_ops; 200 u8 no_ops;
201 struct __ctrl *ctr = &entry->region.ctrl; 201 struct __ctrl *ctr = &entry->region.ctrl;
202 struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr; 202 struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
203 203
@@ -206,7 +206,6 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
206 206
207 for (i = 0; i < no_ops; i++) { 207 for (i = 0; i < no_ops; i++) {
208 k = 0; 208 k = 0;
209 opcode = 0;
210 for (k = 0; k < 8; k++) { 209 for (k = 0; k < 8; k++) {
211 if (!(ctr->opcode & (1 << k))) 210 if (!(ctr->opcode & (1 << k)))
212 continue; 211 continue;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index cb6fc5a743ca..5ac93323a40c 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -577,28 +577,30 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
577{ 577{
578 struct net_device *dev = dev_instance; 578 struct net_device *dev = dev_instance;
579 struct cp_private *cp; 579 struct cp_private *cp;
580 int handled = 0;
580 u16 status; 581 u16 status;
581 582
582 if (unlikely(dev == NULL)) 583 if (unlikely(dev == NULL))
583 return IRQ_NONE; 584 return IRQ_NONE;
584 cp = netdev_priv(dev); 585 cp = netdev_priv(dev);
585 586
587 spin_lock(&cp->lock);
588
586 status = cpr16(IntrStatus); 589 status = cpr16(IntrStatus);
587 if (!status || (status == 0xFFFF)) 590 if (!status || (status == 0xFFFF))
588 return IRQ_NONE; 591 goto out_unlock;
592
593 handled = 1;
589 594
590 netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n", 595 netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
591 status, cpr8(Cmd), cpr16(CpCmd)); 596 status, cpr8(Cmd), cpr16(CpCmd));
592 597
593 cpw16(IntrStatus, status & ~cp_rx_intr_mask); 598 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
594 599
595 spin_lock(&cp->lock);
596
597 /* close possible race's with dev_close */ 600 /* close possible race's with dev_close */
598 if (unlikely(!netif_running(dev))) { 601 if (unlikely(!netif_running(dev))) {
599 cpw16(IntrMask, 0); 602 cpw16(IntrMask, 0);
600 spin_unlock(&cp->lock); 603 goto out_unlock;
601 return IRQ_HANDLED;
602 } 604 }
603 605
604 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) 606 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
@@ -612,7 +614,6 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
612 if (status & LinkChg) 614 if (status & LinkChg)
613 mii_check_media(&cp->mii_if, netif_msg_link(cp), false); 615 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
614 616
615 spin_unlock(&cp->lock);
616 617
617 if (status & PciErr) { 618 if (status & PciErr) {
618 u16 pci_status; 619 u16 pci_status;
@@ -625,7 +626,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
625 /* TODO: reset hardware */ 626 /* TODO: reset hardware */
626 } 627 }
627 628
628 return IRQ_HANDLED; 629out_unlock:
630 spin_unlock(&cp->lock);
631
632 return IRQ_RETVAL(handled);
629} 633}
630 634
631#ifdef CONFIG_NET_POLL_CONTROLLER 635#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 022b45bc14ff..a670d23d9340 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2386,8 +2386,6 @@ static const struct of_device_id smc91x_match[] = {
2386 {}, 2386 {},
2387}; 2387};
2388MODULE_DEVICE_TABLE(of, smc91x_match); 2388MODULE_DEVICE_TABLE(of, smc91x_match);
2389#else
2390#define smc91x_match NULL
2391#endif 2389#endif
2392 2390
2393static struct dev_pm_ops smc_drv_pm_ops = { 2391static struct dev_pm_ops smc_drv_pm_ops = {
@@ -2402,7 +2400,7 @@ static struct platform_driver smc_driver = {
2402 .name = CARDNAME, 2400 .name = CARDNAME,
2403 .owner = THIS_MODULE, 2401 .owner = THIS_MODULE,
2404 .pm = &smc_drv_pm_ops, 2402 .pm = &smc_drv_pm_ops,
2405 .of_match_table = smc91x_match, 2403 .of_match_table = of_match_ptr(smc91x_match),
2406 }, 2404 },
2407}; 2405};
2408 2406
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 4616bf27d515..e112877d15d3 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2575,11 +2575,13 @@ static const struct dev_pm_ops smsc911x_pm_ops = {
2575#define SMSC911X_PM_OPS NULL 2575#define SMSC911X_PM_OPS NULL
2576#endif 2576#endif
2577 2577
2578#ifdef CONFIG_OF
2578static const struct of_device_id smsc911x_dt_ids[] = { 2579static const struct of_device_id smsc911x_dt_ids[] = {
2579 { .compatible = "smsc,lan9115", }, 2580 { .compatible = "smsc,lan9115", },
2580 { /* sentinel */ } 2581 { /* sentinel */ }
2581}; 2582};
2582MODULE_DEVICE_TABLE(of, smsc911x_dt_ids); 2583MODULE_DEVICE_TABLE(of, smsc911x_dt_ids);
2584#endif
2583 2585
2584static struct platform_driver smsc911x_driver = { 2586static struct platform_driver smsc911x_driver = {
2585 .probe = smsc911x_drv_probe, 2587 .probe = smsc911x_drv_probe,
@@ -2588,7 +2590,7 @@ static struct platform_driver smsc911x_driver = {
2588 .name = SMSC_CHIPNAME, 2590 .name = SMSC_CHIPNAME,
2589 .owner = THIS_MODULE, 2591 .owner = THIS_MODULE,
2590 .pm = SMSC911X_PM_OPS, 2592 .pm = SMSC911X_PM_OPS,
2591 .of_match_table = smsc911x_dt_ids, 2593 .of_match_table = of_match_ptr(smsc911x_dt_ids),
2592 }, 2594 },
2593}; 2595};
2594 2596
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 023a4fb4efa5..b05df8983be5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -127,14 +127,14 @@ static inline int stmmac_register_platform(void)
127} 127}
128static inline void stmmac_unregister_platform(void) 128static inline void stmmac_unregister_platform(void)
129{ 129{
130 platform_driver_register(&stmmac_pltfr_driver); 130 platform_driver_unregister(&stmmac_pltfr_driver);
131} 131}
132#else 132#else
133static inline int stmmac_register_platform(void) 133static inline int stmmac_register_platform(void)
134{ 134{
135 pr_debug("stmmac: do not register the platf driver\n"); 135 pr_debug("stmmac: do not register the platf driver\n");
136 136
137 return -EINVAL; 137 return 0;
138} 138}
139static inline void stmmac_unregister_platform(void) 139static inline void stmmac_unregister_platform(void)
140{ 140{
@@ -162,7 +162,7 @@ static inline int stmmac_register_pci(void)
162{ 162{
163 pr_debug("stmmac: do not register the PCI driver\n"); 163 pr_debug("stmmac: do not register the PCI driver\n");
164 164
165 return -EINVAL; 165 return 0;
166} 166}
167static inline void stmmac_unregister_pci(void) 167static inline void stmmac_unregister_pci(void)
168{ 168{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 542edbcd92c7..f07c0612abf6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2194,18 +2194,20 @@ int stmmac_restore(struct net_device *ndev)
2194 */ 2194 */
2195static int __init stmmac_init(void) 2195static int __init stmmac_init(void)
2196{ 2196{
2197 int err_plt = 0; 2197 int ret;
2198 int err_pci = 0;
2199
2200 err_plt = stmmac_register_platform();
2201 err_pci = stmmac_register_pci();
2202
2203 if ((err_pci) && (err_plt)) {
2204 pr_err("stmmac: driver registration failed\n");
2205 return -EINVAL;
2206 }
2207 2198
2199 ret = stmmac_register_platform();
2200 if (ret)
2201 goto err;
2202 ret = stmmac_register_pci();
2203 if (ret)
2204 goto err_pci;
2208 return 0; 2205 return 0;
2206err_pci:
2207 stmmac_unregister_platform();
2208err:
2209 pr_err("stmmac: driver registration failed\n");
2210 return ret;
2209} 2211}
2210 2212
2211static void __exit stmmac_exit(void) 2213static void __exit stmmac_exit(void)
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 337766738eca..5e62c1aeeffb 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -27,8 +27,6 @@
27#include <linux/uaccess.h> 27#include <linux/uaccess.h>
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29 29
30#include <plat/clock.h>
31
32#include "cpts.h" 30#include "cpts.h"
33 31
34#ifdef CONFIG_TI_CPTS 32#ifdef CONFIG_TI_CPTS
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 40b426edc9e6..504f7f1cad94 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -138,6 +138,8 @@ struct tun_file {
138 /* only used for fasnyc */ 138 /* only used for fasnyc */
139 unsigned int flags; 139 unsigned int flags;
140 u16 queue_index; 140 u16 queue_index;
141 struct list_head next;
142 struct tun_struct *detached;
141}; 143};
142 144
143struct tun_flow_entry { 145struct tun_flow_entry {
@@ -182,6 +184,8 @@ struct tun_struct {
182 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 184 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
183 struct timer_list flow_gc_timer; 185 struct timer_list flow_gc_timer;
184 unsigned long ageing_time; 186 unsigned long ageing_time;
187 unsigned int numdisabled;
188 struct list_head disabled;
185}; 189};
186 190
187static inline u32 tun_hashfn(u32 rxhash) 191static inline u32 tun_hashfn(u32 rxhash)
@@ -385,6 +389,23 @@ static void tun_set_real_num_queues(struct tun_struct *tun)
385 netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 389 netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
386} 390}
387 391
392static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
393{
394 tfile->detached = tun;
395 list_add_tail(&tfile->next, &tun->disabled);
396 ++tun->numdisabled;
397}
398
399static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
400{
401 struct tun_struct *tun = tfile->detached;
402
403 tfile->detached = NULL;
404 list_del_init(&tfile->next);
405 --tun->numdisabled;
406 return tun;
407}
408
388static void __tun_detach(struct tun_file *tfile, bool clean) 409static void __tun_detach(struct tun_file *tfile, bool clean)
389{ 410{
390 struct tun_file *ntfile; 411 struct tun_file *ntfile;
@@ -406,20 +427,25 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
406 ntfile->queue_index = index; 427 ntfile->queue_index = index;
407 428
408 --tun->numqueues; 429 --tun->numqueues;
409 sock_put(&tfile->sk); 430 if (clean)
431 sock_put(&tfile->sk);
432 else
433 tun_disable_queue(tun, tfile);
410 434
411 synchronize_net(); 435 synchronize_net();
412 tun_flow_delete_by_queue(tun, tun->numqueues + 1); 436 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
413 /* Drop read queue */ 437 /* Drop read queue */
414 skb_queue_purge(&tfile->sk.sk_receive_queue); 438 skb_queue_purge(&tfile->sk.sk_receive_queue);
415 tun_set_real_num_queues(tun); 439 tun_set_real_num_queues(tun);
416 440 } else if (tfile->detached && clean)
417 if (tun->numqueues == 0 && !(tun->flags & TUN_PERSIST)) 441 tun = tun_enable_queue(tfile);
418 if (dev->reg_state == NETREG_REGISTERED)
419 unregister_netdevice(dev);
420 }
421 442
422 if (clean) { 443 if (clean) {
444 if (tun && tun->numqueues == 0 && tun->numdisabled == 0 &&
445 !(tun->flags & TUN_PERSIST))
446 if (tun->dev->reg_state == NETREG_REGISTERED)
447 unregister_netdevice(tun->dev);
448
423 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, 449 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
424 &tfile->socket.flags)); 450 &tfile->socket.flags));
425 sk_release_kernel(&tfile->sk); 451 sk_release_kernel(&tfile->sk);
@@ -436,7 +462,7 @@ static void tun_detach(struct tun_file *tfile, bool clean)
436static void tun_detach_all(struct net_device *dev) 462static void tun_detach_all(struct net_device *dev)
437{ 463{
438 struct tun_struct *tun = netdev_priv(dev); 464 struct tun_struct *tun = netdev_priv(dev);
439 struct tun_file *tfile; 465 struct tun_file *tfile, *tmp;
440 int i, n = tun->numqueues; 466 int i, n = tun->numqueues;
441 467
442 for (i = 0; i < n; i++) { 468 for (i = 0; i < n; i++) {
@@ -457,6 +483,12 @@ static void tun_detach_all(struct net_device *dev)
457 skb_queue_purge(&tfile->sk.sk_receive_queue); 483 skb_queue_purge(&tfile->sk.sk_receive_queue);
458 sock_put(&tfile->sk); 484 sock_put(&tfile->sk);
459 } 485 }
486 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
487 tun_enable_queue(tfile);
488 skb_queue_purge(&tfile->sk.sk_receive_queue);
489 sock_put(&tfile->sk);
490 }
491 BUG_ON(tun->numdisabled != 0);
460} 492}
461 493
462static int tun_attach(struct tun_struct *tun, struct file *file) 494static int tun_attach(struct tun_struct *tun, struct file *file)
@@ -473,7 +505,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
473 goto out; 505 goto out;
474 506
475 err = -E2BIG; 507 err = -E2BIG;
476 if (tun->numqueues == MAX_TAP_QUEUES) 508 if (!tfile->detached &&
509 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
477 goto out; 510 goto out;
478 511
479 err = 0; 512 err = 0;
@@ -487,9 +520,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
487 tfile->queue_index = tun->numqueues; 520 tfile->queue_index = tun->numqueues;
488 rcu_assign_pointer(tfile->tun, tun); 521 rcu_assign_pointer(tfile->tun, tun);
489 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 522 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
490 sock_hold(&tfile->sk);
491 tun->numqueues++; 523 tun->numqueues++;
492 524
525 if (tfile->detached)
526 tun_enable_queue(tfile);
527 else
528 sock_hold(&tfile->sk);
529
493 tun_set_real_num_queues(tun); 530 tun_set_real_num_queues(tun);
494 531
495 /* device is allowed to go away first, so no need to hold extra 532 /* device is allowed to go away first, so no need to hold extra
@@ -1162,6 +1199,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1162 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1199 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1163 } 1200 }
1164 1201
1202 skb_reset_network_header(skb);
1165 rxhash = skb_get_rxhash(skb); 1203 rxhash = skb_get_rxhash(skb);
1166 netif_rx_ni(skb); 1204 netif_rx_ni(skb);
1167 1205
@@ -1349,6 +1387,7 @@ static void tun_free_netdev(struct net_device *dev)
1349{ 1387{
1350 struct tun_struct *tun = netdev_priv(dev); 1388 struct tun_struct *tun = netdev_priv(dev);
1351 1389
1390 BUG_ON(!(list_empty(&tun->disabled)));
1352 tun_flow_uninit(tun); 1391 tun_flow_uninit(tun);
1353 free_netdev(dev); 1392 free_netdev(dev);
1354} 1393}
@@ -1543,6 +1582,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1543 err = tun_attach(tun, file); 1582 err = tun_attach(tun, file);
1544 if (err < 0) 1583 if (err < 0)
1545 return err; 1584 return err;
1585
1586 if (tun->flags & TUN_TAP_MQ &&
1587 (tun->numqueues + tun->numdisabled > 1))
1588 return err;
1546 } 1589 }
1547 else { 1590 else {
1548 char *name; 1591 char *name;
@@ -1601,6 +1644,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1601 TUN_USER_FEATURES; 1644 TUN_USER_FEATURES;
1602 dev->features = dev->hw_features; 1645 dev->features = dev->hw_features;
1603 1646
1647 INIT_LIST_HEAD(&tun->disabled);
1604 err = tun_attach(tun, file); 1648 err = tun_attach(tun, file);
1605 if (err < 0) 1649 if (err < 0)
1606 goto err_free_dev; 1650 goto err_free_dev;
@@ -1755,32 +1799,28 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
1755{ 1799{
1756 struct tun_file *tfile = file->private_data; 1800 struct tun_file *tfile = file->private_data;
1757 struct tun_struct *tun; 1801 struct tun_struct *tun;
1758 struct net_device *dev;
1759 int ret = 0; 1802 int ret = 0;
1760 1803
1761 rtnl_lock(); 1804 rtnl_lock();
1762 1805
1763 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 1806 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
1764 dev = __dev_get_by_name(tfile->net, ifr->ifr_name); 1807 tun = tfile->detached;
1765 if (!dev) { 1808 if (!tun)
1766 ret = -EINVAL;
1767 goto unlock;
1768 }
1769
1770 tun = netdev_priv(dev);
1771 if (dev->netdev_ops != &tap_netdev_ops &&
1772 dev->netdev_ops != &tun_netdev_ops)
1773 ret = -EINVAL; 1809 ret = -EINVAL;
1774 else if (tun_not_capable(tun)) 1810 else if (tun_not_capable(tun))
1775 ret = -EPERM; 1811 ret = -EPERM;
1776 else 1812 else
1777 ret = tun_attach(tun, file); 1813 ret = tun_attach(tun, file);
1778 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) 1814 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
1779 __tun_detach(tfile, false); 1815 tun = rcu_dereference_protected(tfile->tun,
1780 else 1816 lockdep_rtnl_is_held());
1817 if (!tun || !(tun->flags & TUN_TAP_MQ))
1818 ret = -EINVAL;
1819 else
1820 __tun_detach(tfile, false);
1821 } else
1781 ret = -EINVAL; 1822 ret = -EINVAL;
1782 1823
1783unlock:
1784 rtnl_unlock(); 1824 rtnl_unlock();
1785 return ret; 1825 return ret;
1786} 1826}
@@ -2092,6 +2132,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
2092 2132
2093 file->private_data = tfile; 2133 file->private_data = tfile;
2094 set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags); 2134 set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
2135 INIT_LIST_HEAD(&tfile->next);
2095 2136
2096 return 0; 2137 return 0;
2097} 2138}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index d0129827602b..3f3d12d766e7 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -457,12 +457,6 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
457} 457}
458EXPORT_SYMBOL_GPL(usbnet_cdc_bind); 458EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
459 459
460static int cdc_manage_power(struct usbnet *dev, int on)
461{
462 dev->intf->needs_remote_wakeup = on;
463 return 0;
464}
465
466static const struct driver_info cdc_info = { 460static const struct driver_info cdc_info = {
467 .description = "CDC Ethernet Device", 461 .description = "CDC Ethernet Device",
468 .flags = FLAG_ETHER | FLAG_POINTTOPOINT, 462 .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
@@ -470,7 +464,7 @@ static const struct driver_info cdc_info = {
470 .bind = usbnet_cdc_bind, 464 .bind = usbnet_cdc_bind,
471 .unbind = usbnet_cdc_unbind, 465 .unbind = usbnet_cdc_unbind,
472 .status = usbnet_cdc_status, 466 .status = usbnet_cdc_status,
473 .manage_power = cdc_manage_power, 467 .manage_power = usbnet_manage_power,
474}; 468};
475 469
476static const struct driver_info wwan_info = { 470static const struct driver_info wwan_info = {
@@ -479,7 +473,7 @@ static const struct driver_info wwan_info = {
479 .bind = usbnet_cdc_bind, 473 .bind = usbnet_cdc_bind,
480 .unbind = usbnet_cdc_unbind, 474 .unbind = usbnet_cdc_unbind,
481 .status = usbnet_cdc_status, 475 .status = usbnet_cdc_status,
482 .manage_power = cdc_manage_power, 476 .manage_power = usbnet_manage_power,
483}; 477};
484 478
485/*-------------------------------------------------------------------------*/ 479/*-------------------------------------------------------------------------*/
@@ -487,6 +481,7 @@ static const struct driver_info wwan_info = {
487#define HUAWEI_VENDOR_ID 0x12D1 481#define HUAWEI_VENDOR_ID 0x12D1
488#define NOVATEL_VENDOR_ID 0x1410 482#define NOVATEL_VENDOR_ID 0x1410
489#define ZTE_VENDOR_ID 0x19D2 483#define ZTE_VENDOR_ID 0x19D2
484#define DELL_VENDOR_ID 0x413C
490 485
491static const struct usb_device_id products [] = { 486static const struct usb_device_id products [] = {
492/* 487/*
@@ -594,27 +589,29 @@ static const struct usb_device_id products [] = {
594 589
595/* Novatel USB551L and MC551 - handled by qmi_wwan */ 590/* Novatel USB551L and MC551 - handled by qmi_wwan */
596{ 591{
597 .match_flags = USB_DEVICE_ID_MATCH_VENDOR 592 USB_DEVICE_AND_INTERFACE_INFO(NOVATEL_VENDOR_ID, 0xB001, USB_CLASS_COMM,
598 | USB_DEVICE_ID_MATCH_PRODUCT 593 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
599 | USB_DEVICE_ID_MATCH_INT_INFO,
600 .idVendor = NOVATEL_VENDOR_ID,
601 .idProduct = 0xB001,
602 .bInterfaceClass = USB_CLASS_COMM,
603 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
604 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
605 .driver_info = 0, 594 .driver_info = 0,
606}, 595},
607 596
608/* Novatel E362 - handled by qmi_wwan */ 597/* Novatel E362 - handled by qmi_wwan */
609{ 598{
610 .match_flags = USB_DEVICE_ID_MATCH_VENDOR 599 USB_DEVICE_AND_INTERFACE_INFO(NOVATEL_VENDOR_ID, 0x9010, USB_CLASS_COMM,
611 | USB_DEVICE_ID_MATCH_PRODUCT 600 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
612 | USB_DEVICE_ID_MATCH_INT_INFO, 601 .driver_info = 0,
613 .idVendor = NOVATEL_VENDOR_ID, 602},
614 .idProduct = 0x9010, 603
615 .bInterfaceClass = USB_CLASS_COMM, 604/* Dell Wireless 5800 (Novatel E362) - handled by qmi_wwan */
616 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, 605{
617 .bInterfaceProtocol = USB_CDC_PROTO_NONE, 606 USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x8195, USB_CLASS_COMM,
607 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
608 .driver_info = 0,
609},
610
611/* Dell Wireless 5800 (Novatel E362) - handled by qmi_wwan */
612{
613 USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x8196, USB_CLASS_COMM,
614 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
618 .driver_info = 0, 615 .driver_info = 0,
619}, 616},
620 617
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d38bc20a60e2..71b6e92b8e9b 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1129,19 +1129,13 @@ static void cdc_ncm_disconnect(struct usb_interface *intf)
1129 usbnet_disconnect(intf); 1129 usbnet_disconnect(intf);
1130} 1130}
1131 1131
1132static int cdc_ncm_manage_power(struct usbnet *dev, int status)
1133{
1134 dev->intf->needs_remote_wakeup = status;
1135 return 0;
1136}
1137
1138static const struct driver_info cdc_ncm_info = { 1132static const struct driver_info cdc_ncm_info = {
1139 .description = "CDC NCM", 1133 .description = "CDC NCM",
1140 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET, 1134 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
1141 .bind = cdc_ncm_bind, 1135 .bind = cdc_ncm_bind,
1142 .unbind = cdc_ncm_unbind, 1136 .unbind = cdc_ncm_unbind,
1143 .check_connect = cdc_ncm_check_connect, 1137 .check_connect = cdc_ncm_check_connect,
1144 .manage_power = cdc_ncm_manage_power, 1138 .manage_power = usbnet_manage_power,
1145 .status = cdc_ncm_status, 1139 .status = cdc_ncm_status,
1146 .rx_fixup = cdc_ncm_rx_fixup, 1140 .rx_fixup = cdc_ncm_rx_fixup,
1147 .tx_fixup = cdc_ncm_tx_fixup, 1141 .tx_fixup = cdc_ncm_tx_fixup,
@@ -1155,7 +1149,7 @@ static const struct driver_info wwan_info = {
1155 .bind = cdc_ncm_bind, 1149 .bind = cdc_ncm_bind,
1156 .unbind = cdc_ncm_unbind, 1150 .unbind = cdc_ncm_unbind,
1157 .check_connect = cdc_ncm_check_connect, 1151 .check_connect = cdc_ncm_check_connect,
1158 .manage_power = cdc_ncm_manage_power, 1152 .manage_power = usbnet_manage_power,
1159 .status = cdc_ncm_status, 1153 .status = cdc_ncm_status,
1160 .rx_fixup = cdc_ncm_rx_fixup, 1154 .rx_fixup = cdc_ncm_rx_fixup,
1161 .tx_fixup = cdc_ncm_tx_fixup, 1155 .tx_fixup = cdc_ncm_tx_fixup,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 1ea91f4237f0..91d7cb9728eb 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -383,6 +383,20 @@ static const struct usb_device_id products[] = {
383 USB_CDC_PROTO_NONE), 383 USB_CDC_PROTO_NONE),
384 .driver_info = (unsigned long)&qmi_wwan_info, 384 .driver_info = (unsigned long)&qmi_wwan_info,
385 }, 385 },
386 { /* Dell Wireless 5800 (Novatel E362) */
387 USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x8195,
388 USB_CLASS_COMM,
389 USB_CDC_SUBCLASS_ETHERNET,
390 USB_CDC_PROTO_NONE),
391 .driver_info = (unsigned long)&qmi_wwan_info,
392 },
393 { /* Dell Wireless 5800 V2 (Novatel E362) */
394 USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x8196,
395 USB_CLASS_COMM,
396 USB_CDC_SUBCLASS_ETHERNET,
397 USB_CDC_PROTO_NONE),
398 .driver_info = (unsigned long)&qmi_wwan_info,
399 },
386 400
387 /* 3. Combined interface devices matching on interface number */ 401 /* 3. Combined interface devices matching on interface number */
388 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 402 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
@@ -419,6 +433,7 @@ static const struct usb_device_id products[] = {
419 {QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */ 433 {QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */
420 {QMI_FIXED_INTF(0x19d2, 0x0200, 1)}, 434 {QMI_FIXED_INTF(0x19d2, 0x0200, 1)},
421 {QMI_FIXED_INTF(0x19d2, 0x0257, 3)}, /* ZTE MF821 */ 435 {QMI_FIXED_INTF(0x19d2, 0x0257, 3)}, /* ZTE MF821 */
436 {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
422 {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ 437 {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
423 {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ 438 {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
424 {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */ 439 {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index c04110ba677f..3d4bf01641b4 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -719,7 +719,8 @@ int usbnet_stop (struct net_device *net)
719 dev->flags = 0; 719 dev->flags = 0;
720 del_timer_sync (&dev->delay); 720 del_timer_sync (&dev->delay);
721 tasklet_kill (&dev->bh); 721 tasklet_kill (&dev->bh);
722 if (info->manage_power) 722 if (info->manage_power &&
723 !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
723 info->manage_power(dev, 0); 724 info->manage_power(dev, 0);
724 else 725 else
725 usb_autopm_put_interface(dev->intf); 726 usb_autopm_put_interface(dev->intf);
@@ -794,14 +795,14 @@ int usbnet_open (struct net_device *net)
794 tasklet_schedule (&dev->bh); 795 tasklet_schedule (&dev->bh);
795 if (info->manage_power) { 796 if (info->manage_power) {
796 retval = info->manage_power(dev, 1); 797 retval = info->manage_power(dev, 1);
797 if (retval < 0) 798 if (retval < 0) {
798 goto done_manage_power_error; 799 retval = 0;
799 usb_autopm_put_interface(dev->intf); 800 set_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
801 } else {
802 usb_autopm_put_interface(dev->intf);
803 }
800 } 804 }
801 return retval; 805 return retval;
802
803done_manage_power_error:
804 clear_bit(EVENT_DEV_OPEN, &dev->flags);
805done: 806done:
806 usb_autopm_put_interface(dev->intf); 807 usb_autopm_put_interface(dev->intf);
807done_nopm: 808done_nopm:
@@ -1615,6 +1616,16 @@ void usbnet_device_suggests_idle(struct usbnet *dev)
1615} 1616}
1616EXPORT_SYMBOL(usbnet_device_suggests_idle); 1617EXPORT_SYMBOL(usbnet_device_suggests_idle);
1617 1618
1619/*
1620 * For devices that can do without special commands
1621 */
1622int usbnet_manage_power(struct usbnet *dev, int on)
1623{
1624 dev->intf->needs_remote_wakeup = on;
1625 return 0;
1626}
1627EXPORT_SYMBOL(usbnet_manage_power);
1628
1618/*-------------------------------------------------------------------------*/ 1629/*-------------------------------------------------------------------------*/
1619static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 1630static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1620 u16 value, u16 index, void *data, u16 size) 1631 u16 value, u16 index, void *data, u16 size)
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 6650fde99e1d..9f1e947f3557 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -152,6 +152,9 @@ enum {
152 /* Device IDs */ 152 /* Device IDs */
153 USB_DEVICE_ID_I6050 = 0x0186, 153 USB_DEVICE_ID_I6050 = 0x0186,
154 USB_DEVICE_ID_I6050_2 = 0x0188, 154 USB_DEVICE_ID_I6050_2 = 0x0188,
155 USB_DEVICE_ID_I6150 = 0x07d6,
156 USB_DEVICE_ID_I6150_2 = 0x07d7,
157 USB_DEVICE_ID_I6150_3 = 0x07d9,
155 USB_DEVICE_ID_I6250 = 0x0187, 158 USB_DEVICE_ID_I6250 = 0x0187,
156}; 159};
157 160
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 713d033891e6..080f36303a4f 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -510,6 +510,9 @@ int i2400mu_probe(struct usb_interface *iface,
510 switch (id->idProduct) { 510 switch (id->idProduct) {
511 case USB_DEVICE_ID_I6050: 511 case USB_DEVICE_ID_I6050:
512 case USB_DEVICE_ID_I6050_2: 512 case USB_DEVICE_ID_I6050_2:
513 case USB_DEVICE_ID_I6150:
514 case USB_DEVICE_ID_I6150_2:
515 case USB_DEVICE_ID_I6150_3:
513 case USB_DEVICE_ID_I6250: 516 case USB_DEVICE_ID_I6250:
514 i2400mu->i6050 = 1; 517 i2400mu->i6050 = 1;
515 break; 518 break;
@@ -759,6 +762,9 @@ static
759struct usb_device_id i2400mu_id_table[] = { 762struct usb_device_id i2400mu_id_table[] = {
760 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, 763 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
761 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) }, 764 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
765 { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150) },
766 { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_2) },
767 { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_3) },
762 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) }, 768 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) },
763 { USB_DEVICE(0x8086, 0x0181) }, 769 { USB_DEVICE(0x8086, 0x0181) },
764 { USB_DEVICE(0x8086, 0x1403) }, 770 { USB_DEVICE(0x8086, 0x1403) },
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 062dfdff6364..67156efe14c4 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -47,7 +47,7 @@ obj-$(CONFIG_RT2X00) += rt2x00/
47 47
48obj-$(CONFIG_P54_COMMON) += p54/ 48obj-$(CONFIG_P54_COMMON) += p54/
49 49
50obj-$(CONFIG_ATH_COMMON) += ath/ 50obj-$(CONFIG_ATH_CARDS) += ath/
51 51
52obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o 52obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
53 53
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 4ffb6a584cd0..44f8b3f3cbed 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -685,6 +685,14 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
685 * to mac80211. 685 * to mac80211.
686 */ 686 */
687 rx_status = IEEE80211_SKB_RXCB(entry->skb); 687 rx_status = IEEE80211_SKB_RXCB(entry->skb);
688
689 /* Ensure that all fields of rx_status are initialized
690 * properly. The skb->cb array was used for driver
691 * specific informations, so rx_status might contain
692 * garbage.
693 */
694 memset(rx_status, 0, sizeof(*rx_status));
695
688 rx_status->mactime = rxdesc.timestamp; 696 rx_status->mactime = rxdesc.timestamp;
689 rx_status->band = rt2x00dev->curr_band; 697 rx_status->band = rt2x00dev->curr_band;
690 rx_status->freq = rt2x00dev->curr_freq; 698 rx_status->freq = rt2x00dev->curr_freq;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index db8d211a0d05..2390ddb22d60 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -629,7 +629,7 @@ struct device_node *of_find_matching_node_and_match(struct device_node *from,
629 read_unlock(&devtree_lock); 629 read_unlock(&devtree_lock);
630 return np; 630 return np;
631} 631}
632EXPORT_SYMBOL(of_find_matching_node); 632EXPORT_SYMBOL(of_find_matching_node_and_match);
633 633
634/** 634/**
635 * of_modalias_node - Lookup appropriate modalias for a device node 635 * of_modalias_node - Lookup appropriate modalias for a device node
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index adb3a4b59cb3..6ba047f5ac2c 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -239,44 +239,37 @@ static bool is_full_charged(struct charger_manager *cm)
239 int uV; 239 int uV;
240 240
241 /* If there is no battery, it cannot be charged */ 241 /* If there is no battery, it cannot be charged */
242 if (!is_batt_present(cm)) { 242 if (!is_batt_present(cm))
243 val.intval = 0; 243 return false;
244 goto out;
245 }
246 244
247 if (cm->fuel_gauge && desc->fullbatt_full_capacity > 0) { 245 if (cm->fuel_gauge && desc->fullbatt_full_capacity > 0) {
246 val.intval = 0;
247
248 /* Not full if capacity of fuel gauge isn't full */ 248 /* Not full if capacity of fuel gauge isn't full */
249 ret = cm->fuel_gauge->get_property(cm->fuel_gauge, 249 ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
250 POWER_SUPPLY_PROP_CHARGE_FULL, &val); 250 POWER_SUPPLY_PROP_CHARGE_FULL, &val);
251 if (!ret && val.intval > desc->fullbatt_full_capacity) { 251 if (!ret && val.intval > desc->fullbatt_full_capacity)
252 val.intval = 1; 252 return true;
253 goto out;
254 }
255 } 253 }
256 254
257 /* Full, if it's over the fullbatt voltage */ 255 /* Full, if it's over the fullbatt voltage */
258 if (desc->fullbatt_uV > 0) { 256 if (desc->fullbatt_uV > 0) {
259 ret = get_batt_uV(cm, &uV); 257 ret = get_batt_uV(cm, &uV);
260 if (!ret && uV >= desc->fullbatt_uV) { 258 if (!ret && uV >= desc->fullbatt_uV)
261 val.intval = 1; 259 return true;
262 goto out;
263 }
264 } 260 }
265 261
266 /* Full, if the capacity is more than fullbatt_soc */ 262 /* Full, if the capacity is more than fullbatt_soc */
267 if (cm->fuel_gauge && desc->fullbatt_soc > 0) { 263 if (cm->fuel_gauge && desc->fullbatt_soc > 0) {
264 val.intval = 0;
265
268 ret = cm->fuel_gauge->get_property(cm->fuel_gauge, 266 ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
269 POWER_SUPPLY_PROP_CAPACITY, &val); 267 POWER_SUPPLY_PROP_CAPACITY, &val);
270 if (!ret && val.intval >= desc->fullbatt_soc) { 268 if (!ret && val.intval >= desc->fullbatt_soc)
271 val.intval = 1; 269 return true;
272 goto out;
273 }
274 } 270 }
275 271
276 val.intval = 0; 272 return false;
277
278out:
279 return val.intval ? true : false;
280} 273}
281 274
282/** 275/**
@@ -489,8 +482,9 @@ static void fullbatt_vchk(struct work_struct *work)
489 return; 482 return;
490 } 483 }
491 484
492 diff = desc->fullbatt_uV; 485 diff = desc->fullbatt_uV - batt_uV;
493 diff -= batt_uV; 486 if (diff < 0)
487 return;
494 488
495 dev_info(cm->dev, "VBATT dropped %duV after full-batt.\n", diff); 489 dev_info(cm->dev, "VBATT dropped %duV after full-batt.\n", diff);
496 490
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index ed81720e7b2b..e513cd998170 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -112,6 +112,17 @@ config PWM_SAMSUNG
112 To compile this driver as a module, choose M here: the module 112 To compile this driver as a module, choose M here: the module
113 will be called pwm-samsung. 113 will be called pwm-samsung.
114 114
115config PWM_SPEAR
116 tristate "STMicroelectronics SPEAr PWM support"
117 depends on PLAT_SPEAR
118 depends on OF
119 help
120 Generic PWM framework driver for the PWM controller on ST
121 SPEAr SoCs.
122
123 To compile this driver as a module, choose M here: the module
124 will be called pwm-spear.
125
115config PWM_TEGRA 126config PWM_TEGRA
116 tristate "NVIDIA Tegra PWM support" 127 tristate "NVIDIA Tegra PWM support"
117 depends on ARCH_TEGRA 128 depends on ARCH_TEGRA
@@ -125,6 +136,7 @@ config PWM_TEGRA
125config PWM_TIECAP 136config PWM_TIECAP
126 tristate "ECAP PWM support" 137 tristate "ECAP PWM support"
127 depends on SOC_AM33XX 138 depends on SOC_AM33XX
139 select PWM_TIPWMSS
128 help 140 help
129 PWM driver support for the ECAP APWM controller found on AM33XX 141 PWM driver support for the ECAP APWM controller found on AM33XX
130 TI SOC 142 TI SOC
@@ -135,6 +147,7 @@ config PWM_TIECAP
135config PWM_TIEHRPWM 147config PWM_TIEHRPWM
136 tristate "EHRPWM PWM support" 148 tristate "EHRPWM PWM support"
137 depends on SOC_AM33XX 149 depends on SOC_AM33XX
150 select PWM_TIPWMSS
138 help 151 help
139 PWM driver support for the EHRPWM controller found on AM33XX 152 PWM driver support for the EHRPWM controller found on AM33XX
140 TI SOC 153 TI SOC
@@ -142,14 +155,32 @@ config PWM_TIEHRPWM
142 To compile this driver as a module, choose M here: the module 155 To compile this driver as a module, choose M here: the module
143 will be called pwm-tiehrpwm. 156 will be called pwm-tiehrpwm.
144 157
145config PWM_TWL6030 158config PWM_TIPWMSS
146 tristate "TWL6030 PWM support" 159 bool
160 depends on SOC_AM33XX && (PWM_TIEHRPWM || PWM_TIECAP)
161 help
162 PWM Subsystem driver support for AM33xx SOC.
163
164 PWM submodules require PWM config space access from submodule
165 drivers and require common parent driver support.
166
167config PWM_TWL
168 tristate "TWL4030/6030 PWM support"
169 depends on TWL4030_CORE
170 help
171 Generic PWM framework driver for TWL4030/6030.
172
173 To compile this driver as a module, choose M here: the module
174 will be called pwm-twl.
175
176config PWM_TWL_LED
177 tristate "TWL4030/6030 PWM support for LED drivers"
147 depends on TWL4030_CORE 178 depends on TWL4030_CORE
148 help 179 help
149 Generic PWM framework driver for TWL6030. 180 Generic PWM framework driver for TWL4030/6030 LED terminals.
150 181
151 To compile this driver as a module, choose M here: the module 182 To compile this driver as a module, choose M here: the module
152 will be called pwm-twl6030. 183 will be called pwm-twl-led.
153 184
154config PWM_VT8500 185config PWM_VT8500
155 tristate "vt8500 pwm support" 186 tristate "vt8500 pwm support"
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index acfe4821c58b..62a2963cfe58 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -8,8 +8,11 @@ obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
8obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o 8obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o
9obj-$(CONFIG_PWM_PXA) += pwm-pxa.o 9obj-$(CONFIG_PWM_PXA) += pwm-pxa.o
10obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o 10obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
11obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o
11obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o 12obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o
12obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o 13obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o
13obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o 14obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o
14obj-$(CONFIG_PWM_TWL6030) += pwm-twl6030.o 15obj-$(CONFIG_PWM_TIPWMSS) += pwm-tipwmss.o
16obj-$(CONFIG_PWM_TWL) += pwm-twl.o
17obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o
15obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o 18obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index f5acdaa52707..903138b18842 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -32,6 +32,9 @@
32 32
33#define MAX_PWMS 1024 33#define MAX_PWMS 1024
34 34
35/* flags in the third cell of the DT PWM specifier */
36#define PWM_SPEC_POLARITY (1 << 0)
37
35static DEFINE_MUTEX(pwm_lookup_lock); 38static DEFINE_MUTEX(pwm_lookup_lock);
36static LIST_HEAD(pwm_lookup_list); 39static LIST_HEAD(pwm_lookup_list);
37static DEFINE_MUTEX(pwm_lock); 40static DEFINE_MUTEX(pwm_lock);
@@ -129,6 +132,32 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
129 return 0; 132 return 0;
130} 133}
131 134
135struct pwm_device *
136of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args)
137{
138 struct pwm_device *pwm;
139
140 if (pc->of_pwm_n_cells < 3)
141 return ERR_PTR(-EINVAL);
142
143 if (args->args[0] >= pc->npwm)
144 return ERR_PTR(-EINVAL);
145
146 pwm = pwm_request_from_chip(pc, args->args[0], NULL);
147 if (IS_ERR(pwm))
148 return pwm;
149
150 pwm_set_period(pwm, args->args[1]);
151
152 if (args->args[2] & PWM_SPEC_POLARITY)
153 pwm_set_polarity(pwm, PWM_POLARITY_INVERSED);
154 else
155 pwm_set_polarity(pwm, PWM_POLARITY_NORMAL);
156
157 return pwm;
158}
159EXPORT_SYMBOL_GPL(of_pwm_xlate_with_flags);
160
132static struct pwm_device * 161static struct pwm_device *
133of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args) 162of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
134{ 163{
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index 8f26e9fcea97..65a86bdeabed 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -235,7 +235,7 @@ static int imx_pwm_probe(struct platform_device *pdev)
235{ 235{
236 const struct of_device_id *of_id = 236 const struct of_device_id *of_id =
237 of_match_device(imx_pwm_dt_ids, &pdev->dev); 237 of_match_device(imx_pwm_dt_ids, &pdev->dev);
238 struct imx_pwm_data *data; 238 const struct imx_pwm_data *data;
239 struct imx_chip *imx; 239 struct imx_chip *imx;
240 struct resource *r; 240 struct resource *r;
241 int ret = 0; 241 int ret = 0;
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 015a82235620..14106440294f 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -49,9 +49,24 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
49 c = 0; /* 0 set division by 256 */ 49 c = 0; /* 0 set division by 256 */
50 period_cycles = c; 50 period_cycles = c;
51 51
52 /* The duty-cycle value is as follows:
53 *
54 * DUTY-CYCLE HIGH LEVEL
55 * 1 99.9%
56 * 25 90.0%
57 * 128 50.0%
58 * 220 10.0%
59 * 255 0.1%
60 * 0 0.0%
61 *
62 * In other words, the register value is duty-cycle % 256 with
63 * duty-cycle in the range 1-256.
64 */
52 c = 256 * duty_ns; 65 c = 256 * duty_ns;
53 do_div(c, period_ns); 66 do_div(c, period_ns);
54 duty_cycles = c; 67 if (c > 255)
68 c = 255;
69 duty_cycles = 256 - c;
55 70
56 writel(PWM_ENABLE | PWM_RELOADV(period_cycles) | PWM_DUTY(duty_cycles), 71 writel(PWM_ENABLE | PWM_RELOADV(period_cycles) | PWM_DUTY(duty_cycles),
57 lpc32xx->base + (pwm->hwpwm << 2)); 72 lpc32xx->base + (pwm->hwpwm << 2));
@@ -106,6 +121,7 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
106 lpc32xx->chip.dev = &pdev->dev; 121 lpc32xx->chip.dev = &pdev->dev;
107 lpc32xx->chip.ops = &lpc32xx_pwm_ops; 122 lpc32xx->chip.ops = &lpc32xx_pwm_ops;
108 lpc32xx->chip.npwm = 2; 123 lpc32xx->chip.npwm = 2;
124 lpc32xx->chip.base = -1;
109 125
110 ret = pwmchip_add(&lpc32xx->chip); 126 ret = pwmchip_add(&lpc32xx->chip);
111 if (ret < 0) { 127 if (ret < 0) {
@@ -121,8 +137,11 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
121static int lpc32xx_pwm_remove(struct platform_device *pdev) 137static int lpc32xx_pwm_remove(struct platform_device *pdev)
122{ 138{
123 struct lpc32xx_pwm_chip *lpc32xx = platform_get_drvdata(pdev); 139 struct lpc32xx_pwm_chip *lpc32xx = platform_get_drvdata(pdev);
140 unsigned int i;
141
142 for (i = 0; i < lpc32xx->chip.npwm; i++)
143 pwm_disable(&lpc32xx->chip.pwms[i]);
124 144
125 clk_disable(lpc32xx->clk);
126 return pwmchip_remove(&lpc32xx->chip); 145 return pwmchip_remove(&lpc32xx->chip);
127} 146}
128 147
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index e9b15d099c03..5207e6cd8648 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -222,6 +222,7 @@ static int s3c_pwm_probe(struct platform_device *pdev)
222 222
223 /* calculate base of control bits in TCON */ 223 /* calculate base of control bits in TCON */
224 s3c->tcon_base = id == 0 ? 0 : (id * 4) + 4; 224 s3c->tcon_base = id == 0 ? 0 : (id * 4) + 4;
225 s3c->pwm_id = id;
225 s3c->chip.dev = &pdev->dev; 226 s3c->chip.dev = &pdev->dev;
226 s3c->chip.ops = &s3c_pwm_ops; 227 s3c->chip.ops = &s3c_pwm_ops;
227 s3c->chip.base = -1; 228 s3c->chip.base = -1;
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
new file mode 100644
index 000000000000..83b21d9d5cf9
--- /dev/null
+++ b/drivers/pwm/pwm-spear.c
@@ -0,0 +1,276 @@
1/*
2 * ST Microelectronics SPEAr Pulse Width Modulator driver
3 *
4 * Copyright (C) 2012 ST Microelectronics
5 * Shiraz Hashim <shiraz.hashim@st.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/clk.h>
13#include <linux/err.h>
14#include <linux/io.h>
15#include <linux/ioport.h>
16#include <linux/kernel.h>
17#include <linux/math64.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/platform_device.h>
21#include <linux/pwm.h>
22#include <linux/slab.h>
23#include <linux/types.h>
24
25#define NUM_PWM 4
26
27/* PWM registers and bits definitions */
28#define PWMCR 0x00 /* Control Register */
29#define PWMCR_PWM_ENABLE 0x1
30#define PWMCR_PRESCALE_SHIFT 2
31#define PWMCR_MIN_PRESCALE 0x00
32#define PWMCR_MAX_PRESCALE 0x3FFF
33
34#define PWMDCR 0x04 /* Duty Cycle Register */
35#define PWMDCR_MIN_DUTY 0x0001
36#define PWMDCR_MAX_DUTY 0xFFFF
37
38#define PWMPCR 0x08 /* Period Register */
39#define PWMPCR_MIN_PERIOD 0x0001
40#define PWMPCR_MAX_PERIOD 0xFFFF
41
42/* Following only available on 13xx SoCs */
43#define PWMMCR 0x3C /* Master Control Register */
44#define PWMMCR_PWM_ENABLE 0x1
45
46/**
47 * struct spear_pwm_chip - struct representing pwm chip
48 *
49 * @mmio_base: base address of pwm chip
50 * @clk: pointer to clk structure of pwm chip
51 * @chip: linux pwm chip representation
52 * @dev: pointer to device structure of pwm chip
53 */
54struct spear_pwm_chip {
55 void __iomem *mmio_base;
56 struct clk *clk;
57 struct pwm_chip chip;
58 struct device *dev;
59};
60
61static inline struct spear_pwm_chip *to_spear_pwm_chip(struct pwm_chip *chip)
62{
63 return container_of(chip, struct spear_pwm_chip, chip);
64}
65
66static inline u32 spear_pwm_readl(struct spear_pwm_chip *chip, unsigned int num,
67 unsigned long offset)
68{
69 return readl_relaxed(chip->mmio_base + (num << 4) + offset);
70}
71
72static inline void spear_pwm_writel(struct spear_pwm_chip *chip,
73 unsigned int num, unsigned long offset,
74 unsigned long val)
75{
76 writel_relaxed(val, chip->mmio_base + (num << 4) + offset);
77}
78
79static int spear_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
80 int duty_ns, int period_ns)
81{
82 struct spear_pwm_chip *pc = to_spear_pwm_chip(chip);
83 u64 val, div, clk_rate;
84 unsigned long prescale = PWMCR_MIN_PRESCALE, pv, dc;
85 int ret;
86
87 /*
88 * Find pv, dc and prescale to suit duty_ns and period_ns. This is done
89 * according to formulas described below:
90 *
91 * period_ns = 10^9 * (PRESCALE + 1) * PV / PWM_CLK_RATE
92 * duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
93 *
94 * PV = (PWM_CLK_RATE * period_ns) / (10^9 * (PRESCALE + 1))
95 * DC = (PWM_CLK_RATE * duty_ns) / (10^9 * (PRESCALE + 1))
96 */
97 clk_rate = clk_get_rate(pc->clk);
98 while (1) {
99 div = 1000000000;
100 div *= 1 + prescale;
101 val = clk_rate * period_ns;
102 pv = div64_u64(val, div);
103 val = clk_rate * duty_ns;
104 dc = div64_u64(val, div);
105
106 /* if duty_ns and period_ns are not achievable then return */
107 if (pv < PWMPCR_MIN_PERIOD || dc < PWMDCR_MIN_DUTY)
108 return -EINVAL;
109
110 /*
111 * if pv and dc have crossed their upper limit, then increase
112 * prescale and recalculate pv and dc.
113 */
114 if (pv > PWMPCR_MAX_PERIOD || dc > PWMDCR_MAX_DUTY) {
115 if (++prescale > PWMCR_MAX_PRESCALE)
116 return -EINVAL;
117 continue;
118 }
119 break;
120 }
121
122 /*
123 * NOTE: the clock to PWM has to be enabled first before writing to the
124 * registers.
125 */
126 ret = clk_enable(pc->clk);
127 if (ret)
128 return ret;
129
130 spear_pwm_writel(pc, pwm->hwpwm, PWMCR,
131 prescale << PWMCR_PRESCALE_SHIFT);
132 spear_pwm_writel(pc, pwm->hwpwm, PWMDCR, dc);
133 spear_pwm_writel(pc, pwm->hwpwm, PWMPCR, pv);
134 clk_disable(pc->clk);
135
136 return 0;
137}
138
139static int spear_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
140{
141 struct spear_pwm_chip *pc = to_spear_pwm_chip(chip);
142 int rc = 0;
143 u32 val;
144
145 rc = clk_enable(pc->clk);
146 if (!rc)
147 return rc;
148
149 val = spear_pwm_readl(pc, pwm->hwpwm, PWMCR);
150 val |= PWMCR_PWM_ENABLE;
151 spear_pwm_writel(pc, pwm->hwpwm, PWMCR, val);
152
153 return 0;
154}
155
156static void spear_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
157{
158 struct spear_pwm_chip *pc = to_spear_pwm_chip(chip);
159 u32 val;
160
161 val = spear_pwm_readl(pc, pwm->hwpwm, PWMCR);
162 val &= ~PWMCR_PWM_ENABLE;
163 spear_pwm_writel(pc, pwm->hwpwm, PWMCR, val);
164
165 clk_disable(pc->clk);
166}
167
168static const struct pwm_ops spear_pwm_ops = {
169 .config = spear_pwm_config,
170 .enable = spear_pwm_enable,
171 .disable = spear_pwm_disable,
172 .owner = THIS_MODULE,
173};
174
175static int spear_pwm_probe(struct platform_device *pdev)
176{
177 struct device_node *np = pdev->dev.of_node;
178 struct spear_pwm_chip *pc;
179 struct resource *r;
180 int ret;
181 u32 val;
182
183 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
184 if (!r) {
185 dev_err(&pdev->dev, "no memory resources defined\n");
186 return -ENODEV;
187 }
188
189 pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
190 if (!pc) {
191 dev_err(&pdev->dev, "failed to allocate memory\n");
192 return -ENOMEM;
193 }
194
195 pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
196 if (!pc->mmio_base)
197 return -EADDRNOTAVAIL;
198
199 pc->clk = devm_clk_get(&pdev->dev, NULL);
200 if (IS_ERR(pc->clk))
201 return PTR_ERR(pc->clk);
202
203 pc->dev = &pdev->dev;
204 platform_set_drvdata(pdev, pc);
205
206 pc->chip.dev = &pdev->dev;
207 pc->chip.ops = &spear_pwm_ops;
208 pc->chip.base = -1;
209 pc->chip.npwm = NUM_PWM;
210
211 ret = clk_prepare(pc->clk);
212 if (!ret)
213 return ret;
214
215 if (of_device_is_compatible(np, "st,spear1340-pwm")) {
216 ret = clk_enable(pc->clk);
217 if (!ret) {
218 clk_unprepare(pc->clk);
219 return ret;
220 }
221 /*
222 * Following enables PWM chip, channels would still be
223 * enabled individually through their control register
224 */
225 val = readl_relaxed(pc->mmio_base + PWMMCR);
226 val |= PWMMCR_PWM_ENABLE;
227 writel_relaxed(val, pc->mmio_base + PWMMCR);
228
229 clk_disable(pc->clk);
230 }
231
232 ret = pwmchip_add(&pc->chip);
233 if (!ret) {
234 clk_unprepare(pc->clk);
235 dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
236 }
237
238 return ret;
239}
240
241static int spear_pwm_remove(struct platform_device *pdev)
242{
243 struct spear_pwm_chip *pc = platform_get_drvdata(pdev);
244 int i;
245
246 for (i = 0; i < NUM_PWM; i++)
247 pwm_disable(&pc->chip.pwms[i]);
248
249 /* clk was prepared in probe, hence unprepare it here */
250 clk_unprepare(pc->clk);
251 return pwmchip_remove(&pc->chip);
252}
253
254static struct of_device_id spear_pwm_of_match[] = {
255 { .compatible = "st,spear320-pwm" },
256 { .compatible = "st,spear1340-pwm" },
257 { }
258};
259
260MODULE_DEVICE_TABLE(of, spear_pwm_of_match);
261
262static struct platform_driver spear_pwm_driver = {
263 .driver = {
264 .name = "spear-pwm",
265 .of_match_table = spear_pwm_of_match,
266 },
267 .probe = spear_pwm_probe,
268 .remove = spear_pwm_remove,
269};
270
271module_platform_driver(spear_pwm_driver);
272
273MODULE_LICENSE("GPL");
274MODULE_AUTHOR("Shiraz Hashim <shiraz.hashim@st.com>");
275MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.com>");
276MODULE_ALIAS("platform:spear-pwm");
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 87c091b245cc..5cf016dd9822 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -25,6 +25,10 @@
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/pwm.h> 27#include <linux/pwm.h>
28#include <linux/of_device.h>
29#include <linux/pinctrl/consumer.h>
30
31#include "pwm-tipwmss.h"
28 32
29/* ECAP registers and bits definitions */ 33/* ECAP registers and bits definitions */
30#define CAP1 0x08 34#define CAP1 0x08
@@ -184,12 +188,24 @@ static const struct pwm_ops ecap_pwm_ops = {
184 .owner = THIS_MODULE, 188 .owner = THIS_MODULE,
185}; 189};
186 190
191static const struct of_device_id ecap_of_match[] = {
192 { .compatible = "ti,am33xx-ecap" },
193 {},
194};
195MODULE_DEVICE_TABLE(of, ecap_of_match);
196
187static int ecap_pwm_probe(struct platform_device *pdev) 197static int ecap_pwm_probe(struct platform_device *pdev)
188{ 198{
189 int ret; 199 int ret;
190 struct resource *r; 200 struct resource *r;
191 struct clk *clk; 201 struct clk *clk;
192 struct ecap_pwm_chip *pc; 202 struct ecap_pwm_chip *pc;
203 u16 status;
204 struct pinctrl *pinctrl;
205
206 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
207 if (IS_ERR(pinctrl))
208 dev_warn(&pdev->dev, "unable to select pin group\n");
193 209
194 pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL); 210 pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
195 if (!pc) { 211 if (!pc) {
@@ -211,6 +227,8 @@ static int ecap_pwm_probe(struct platform_device *pdev)
211 227
212 pc->chip.dev = &pdev->dev; 228 pc->chip.dev = &pdev->dev;
213 pc->chip.ops = &ecap_pwm_ops; 229 pc->chip.ops = &ecap_pwm_ops;
230 pc->chip.of_xlate = of_pwm_xlate_with_flags;
231 pc->chip.of_pwm_n_cells = 3;
214 pc->chip.base = -1; 232 pc->chip.base = -1;
215 pc->chip.npwm = 1; 233 pc->chip.npwm = 1;
216 234
@@ -231,14 +249,40 @@ static int ecap_pwm_probe(struct platform_device *pdev)
231 } 249 }
232 250
233 pm_runtime_enable(&pdev->dev); 251 pm_runtime_enable(&pdev->dev);
252 pm_runtime_get_sync(&pdev->dev);
253
254 status = pwmss_submodule_state_change(pdev->dev.parent,
255 PWMSS_ECAPCLK_EN);
256 if (!(status & PWMSS_ECAPCLK_EN_ACK)) {
257 dev_err(&pdev->dev, "PWMSS config space clock enable failed\n");
258 ret = -EINVAL;
259 goto pwmss_clk_failure;
260 }
261
262 pm_runtime_put_sync(&pdev->dev);
263
234 platform_set_drvdata(pdev, pc); 264 platform_set_drvdata(pdev, pc);
235 return 0; 265 return 0;
266
267pwmss_clk_failure:
268 pm_runtime_put_sync(&pdev->dev);
269 pm_runtime_disable(&pdev->dev);
270 pwmchip_remove(&pc->chip);
271 return ret;
236} 272}
237 273
238static int ecap_pwm_remove(struct platform_device *pdev) 274static int ecap_pwm_remove(struct platform_device *pdev)
239{ 275{
240 struct ecap_pwm_chip *pc = platform_get_drvdata(pdev); 276 struct ecap_pwm_chip *pc = platform_get_drvdata(pdev);
241 277
278 pm_runtime_get_sync(&pdev->dev);
279 /*
280 * Due to hardware misbehaviour, acknowledge of the stop_req
281 * is missing. Hence checking of the status bit skipped.
282 */
283 pwmss_submodule_state_change(pdev->dev.parent, PWMSS_ECAPCLK_STOP_REQ);
284 pm_runtime_put_sync(&pdev->dev);
285
242 pm_runtime_put_sync(&pdev->dev); 286 pm_runtime_put_sync(&pdev->dev);
243 pm_runtime_disable(&pdev->dev); 287 pm_runtime_disable(&pdev->dev);
244 return pwmchip_remove(&pc->chip); 288 return pwmchip_remove(&pc->chip);
@@ -246,7 +290,9 @@ static int ecap_pwm_remove(struct platform_device *pdev)
246 290
247static struct platform_driver ecap_pwm_driver = { 291static struct platform_driver ecap_pwm_driver = {
248 .driver = { 292 .driver = {
249 .name = "ecap", 293 .name = "ecap",
294 .owner = THIS_MODULE,
295 .of_match_table = ecap_of_match,
250 }, 296 },
251 .probe = ecap_pwm_probe, 297 .probe = ecap_pwm_probe,
252 .remove = ecap_pwm_remove, 298 .remove = ecap_pwm_remove,
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 9ffd389d0c8b..72a6dd40c9ec 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -25,6 +25,10 @@
25#include <linux/err.h> 25#include <linux/err.h>
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/pm_runtime.h> 27#include <linux/pm_runtime.h>
28#include <linux/of_device.h>
29#include <linux/pinctrl/consumer.h>
30
31#include "pwm-tipwmss.h"
28 32
29/* EHRPWM registers and bits definitions */ 33/* EHRPWM registers and bits definitions */
30 34
@@ -115,6 +119,7 @@ struct ehrpwm_pwm_chip {
115 void __iomem *mmio_base; 119 void __iomem *mmio_base;
116 unsigned long period_cycles[NUM_PWM_CHANNEL]; 120 unsigned long period_cycles[NUM_PWM_CHANNEL];
117 enum pwm_polarity polarity[NUM_PWM_CHANNEL]; 121 enum pwm_polarity polarity[NUM_PWM_CHANNEL];
122 struct clk *tbclk;
118}; 123};
119 124
120static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip) 125static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip)
@@ -335,6 +340,9 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
335 /* Channels polarity can be configured from action qualifier module */ 340 /* Channels polarity can be configured from action qualifier module */
336 configure_polarity(pc, pwm->hwpwm); 341 configure_polarity(pc, pwm->hwpwm);
337 342
343 /* Enable TBCLK before enabling PWM device */
344 clk_enable(pc->tbclk);
345
338 /* Enable time counter for free_run */ 346 /* Enable time counter for free_run */
339 ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN); 347 ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN);
340 return 0; 348 return 0;
@@ -363,6 +371,9 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
363 371
364 ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val); 372 ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
365 373
374 /* Disabling TBCLK on PWM disable */
375 clk_disable(pc->tbclk);
376
366 /* Stop Time base counter */ 377 /* Stop Time base counter */
367 ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_STOP_NEXT); 378 ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_STOP_NEXT);
368 379
@@ -392,12 +403,24 @@ static const struct pwm_ops ehrpwm_pwm_ops = {
392 .owner = THIS_MODULE, 403 .owner = THIS_MODULE,
393}; 404};
394 405
406static const struct of_device_id ehrpwm_of_match[] = {
407 { .compatible = "ti,am33xx-ehrpwm" },
408 {},
409};
410MODULE_DEVICE_TABLE(of, ehrpwm_of_match);
411
395static int ehrpwm_pwm_probe(struct platform_device *pdev) 412static int ehrpwm_pwm_probe(struct platform_device *pdev)
396{ 413{
397 int ret; 414 int ret;
398 struct resource *r; 415 struct resource *r;
399 struct clk *clk; 416 struct clk *clk;
400 struct ehrpwm_pwm_chip *pc; 417 struct ehrpwm_pwm_chip *pc;
418 u16 status;
419 struct pinctrl *pinctrl;
420
421 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
422 if (IS_ERR(pinctrl))
423 dev_warn(&pdev->dev, "unable to select pin group\n");
401 424
402 pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL); 425 pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
403 if (!pc) { 426 if (!pc) {
@@ -419,6 +442,8 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
419 442
420 pc->chip.dev = &pdev->dev; 443 pc->chip.dev = &pdev->dev;
421 pc->chip.ops = &ehrpwm_pwm_ops; 444 pc->chip.ops = &ehrpwm_pwm_ops;
445 pc->chip.of_xlate = of_pwm_xlate_with_flags;
446 pc->chip.of_pwm_n_cells = 3;
422 pc->chip.base = -1; 447 pc->chip.base = -1;
423 pc->chip.npwm = NUM_PWM_CHANNEL; 448 pc->chip.npwm = NUM_PWM_CHANNEL;
424 449
@@ -432,6 +457,13 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
432 if (!pc->mmio_base) 457 if (!pc->mmio_base)
433 return -EADDRNOTAVAIL; 458 return -EADDRNOTAVAIL;
434 459
460 /* Acquire tbclk for Time Base EHRPWM submodule */
461 pc->tbclk = devm_clk_get(&pdev->dev, "tbclk");
462 if (IS_ERR(pc->tbclk)) {
463 dev_err(&pdev->dev, "Failed to get tbclk\n");
464 return PTR_ERR(pc->tbclk);
465 }
466
435 ret = pwmchip_add(&pc->chip); 467 ret = pwmchip_add(&pc->chip);
436 if (ret < 0) { 468 if (ret < 0) {
437 dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); 469 dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
@@ -439,14 +471,40 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
439 } 471 }
440 472
441 pm_runtime_enable(&pdev->dev); 473 pm_runtime_enable(&pdev->dev);
474 pm_runtime_get_sync(&pdev->dev);
475
476 status = pwmss_submodule_state_change(pdev->dev.parent,
477 PWMSS_EPWMCLK_EN);
478 if (!(status & PWMSS_EPWMCLK_EN_ACK)) {
479 dev_err(&pdev->dev, "PWMSS config space clock enable failed\n");
480 ret = -EINVAL;
481 goto pwmss_clk_failure;
482 }
483
484 pm_runtime_put_sync(&pdev->dev);
485
442 platform_set_drvdata(pdev, pc); 486 platform_set_drvdata(pdev, pc);
443 return 0; 487 return 0;
488
489pwmss_clk_failure:
490 pm_runtime_put_sync(&pdev->dev);
491 pm_runtime_disable(&pdev->dev);
492 pwmchip_remove(&pc->chip);
493 return ret;
444} 494}
445 495
446static int ehrpwm_pwm_remove(struct platform_device *pdev) 496static int ehrpwm_pwm_remove(struct platform_device *pdev)
447{ 497{
448 struct ehrpwm_pwm_chip *pc = platform_get_drvdata(pdev); 498 struct ehrpwm_pwm_chip *pc = platform_get_drvdata(pdev);
449 499
500 pm_runtime_get_sync(&pdev->dev);
501 /*
502 * Due to hardware misbehaviour, acknowledge of the stop_req
503 * is missing. Hence checking of the status bit skipped.
504 */
505 pwmss_submodule_state_change(pdev->dev.parent, PWMSS_EPWMCLK_STOP_REQ);
506 pm_runtime_put_sync(&pdev->dev);
507
450 pm_runtime_put_sync(&pdev->dev); 508 pm_runtime_put_sync(&pdev->dev);
451 pm_runtime_disable(&pdev->dev); 509 pm_runtime_disable(&pdev->dev);
452 return pwmchip_remove(&pc->chip); 510 return pwmchip_remove(&pc->chip);
@@ -454,7 +512,9 @@ static int ehrpwm_pwm_remove(struct platform_device *pdev)
454 512
455static struct platform_driver ehrpwm_pwm_driver = { 513static struct platform_driver ehrpwm_pwm_driver = {
456 .driver = { 514 .driver = {
457 .name = "ehrpwm", 515 .name = "ehrpwm",
516 .owner = THIS_MODULE,
517 .of_match_table = ehrpwm_of_match,
458 }, 518 },
459 .probe = ehrpwm_pwm_probe, 519 .probe = ehrpwm_pwm_probe,
460 .remove = ehrpwm_pwm_remove, 520 .remove = ehrpwm_pwm_remove,
diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/pwm/pwm-tipwmss.c
new file mode 100644
index 000000000000..3448a1c88590
--- /dev/null
+++ b/drivers/pwm/pwm-tipwmss.c
@@ -0,0 +1,139 @@
1/*
2 * TI PWM Subsystem driver
3 *
4 * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/io.h>
21#include <linux/err.h>
22#include <linux/pm_runtime.h>
23#include <linux/of_device.h>
24
25#include "pwm-tipwmss.h"
26
27#define PWMSS_CLKCONFIG 0x8 /* Clock gating reg */
28#define PWMSS_CLKSTATUS 0xc /* Clock gating status reg */
29
30struct pwmss_info {
31 void __iomem *mmio_base;
32 struct mutex pwmss_lock;
33 u16 pwmss_clkconfig;
34};
35
36u16 pwmss_submodule_state_change(struct device *dev, int set)
37{
38 struct pwmss_info *info = dev_get_drvdata(dev);
39 u16 val;
40
41 mutex_lock(&info->pwmss_lock);
42 val = readw(info->mmio_base + PWMSS_CLKCONFIG);
43 val |= set;
44 writew(val , info->mmio_base + PWMSS_CLKCONFIG);
45 mutex_unlock(&info->pwmss_lock);
46
47 return readw(info->mmio_base + PWMSS_CLKSTATUS);
48}
49EXPORT_SYMBOL(pwmss_submodule_state_change);
50
51static const struct of_device_id pwmss_of_match[] = {
52 { .compatible = "ti,am33xx-pwmss" },
53 {},
54};
55MODULE_DEVICE_TABLE(of, pwmss_of_match);
56
57static int pwmss_probe(struct platform_device *pdev)
58{
59 int ret;
60 struct resource *r;
61 struct pwmss_info *info;
62 struct device_node *node = pdev->dev.of_node;
63
64 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
65 if (!info) {
66 dev_err(&pdev->dev, "failed to allocate memory\n");
67 return -ENOMEM;
68 }
69
70 mutex_init(&info->pwmss_lock);
71
72 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
73 if (!r) {
74 dev_err(&pdev->dev, "no memory resource defined\n");
75 return -ENODEV;
76 }
77
78 info->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
79 if (!info->mmio_base)
80 return -EADDRNOTAVAIL;
81
82 pm_runtime_enable(&pdev->dev);
83 pm_runtime_get_sync(&pdev->dev);
84 platform_set_drvdata(pdev, info);
85
86 /* Populate all the child nodes here... */
87 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
88 if (ret)
89 dev_err(&pdev->dev, "no child node found\n");
90
91 return ret;
92}
93
94static int pwmss_remove(struct platform_device *pdev)
95{
96 struct pwmss_info *info = platform_get_drvdata(pdev);
97
98 pm_runtime_put_sync(&pdev->dev);
99 pm_runtime_disable(&pdev->dev);
100 mutex_destroy(&info->pwmss_lock);
101 return 0;
102}
103
104static int pwmss_suspend(struct device *dev)
105{
106 struct pwmss_info *info = dev_get_drvdata(dev);
107
108 info->pwmss_clkconfig = readw(info->mmio_base + PWMSS_CLKCONFIG);
109 pm_runtime_put_sync(dev);
110 return 0;
111}
112
113static int pwmss_resume(struct device *dev)
114{
115 struct pwmss_info *info = dev_get_drvdata(dev);
116
117 pm_runtime_get_sync(dev);
118 writew(info->pwmss_clkconfig, info->mmio_base + PWMSS_CLKCONFIG);
119 return 0;
120}
121
122static SIMPLE_DEV_PM_OPS(pwmss_pm_ops, pwmss_suspend, pwmss_resume);
123
124static struct platform_driver pwmss_driver = {
125 .driver = {
126 .name = "pwmss",
127 .owner = THIS_MODULE,
128 .pm = &pwmss_pm_ops,
129 .of_match_table = pwmss_of_match,
130 },
131 .probe = pwmss_probe,
132 .remove = pwmss_remove,
133};
134
135module_platform_driver(pwmss_driver);
136
137MODULE_DESCRIPTION("PWM Subsystem driver");
138MODULE_AUTHOR("Texas Instruments");
139MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-tipwmss.h b/drivers/pwm/pwm-tipwmss.h
new file mode 100644
index 000000000000..11f76a1e266b
--- /dev/null
+++ b/drivers/pwm/pwm-tipwmss.h
@@ -0,0 +1,39 @@
1/*
2 * TI PWM Subsystem driver
3 *
4 * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#ifndef __TIPWMSS_H
19#define __TIPWMSS_H
20
21#ifdef CONFIG_PWM_TIPWMSS
22/* PWM substem clock gating */
23#define PWMSS_ECAPCLK_EN BIT(0)
24#define PWMSS_ECAPCLK_STOP_REQ BIT(1)
25#define PWMSS_EPWMCLK_EN BIT(8)
26#define PWMSS_EPWMCLK_STOP_REQ BIT(9)
27
28#define PWMSS_ECAPCLK_EN_ACK BIT(0)
29#define PWMSS_EPWMCLK_EN_ACK BIT(8)
30
31extern u16 pwmss_submodule_state_change(struct device *dev, int set);
32#else
33static inline u16 pwmss_submodule_state_change(struct device *dev, int set)
34{
35 /* return success status value */
36 return 0xFFFF;
37}
38#endif
39#endif /* __TIPWMSS_H */
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
new file mode 100644
index 000000000000..9dfa0f3eca30
--- /dev/null
+++ b/drivers/pwm/pwm-twl-led.c
@@ -0,0 +1,344 @@
1/*
2 * Driver for TWL4030/6030 Pulse Width Modulator used as LED driver
3 *
4 * Copyright (C) 2012 Texas Instruments
5 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
6 *
7 * This driver is a complete rewrite of the former pwm-twl6030.c authorded by:
8 * Hemanth V <hemanthv@ti.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/pwm.h>
26#include <linux/i2c/twl.h>
27#include <linux/slab.h>
28
29/*
30 * This driver handles the PWM driven LED terminals of TWL4030 and TWL6030.
31 * To generate the signal on TWL4030:
32 * - LEDA uses PWMA
33 * - LEDB uses PWMB
34 * TWL6030 has one LED pin with dedicated LEDPWM
35 */
36
37#define TWL4030_LED_MAX 0x7f
38#define TWL6030_LED_MAX 0xff
39
40/* Registers, bits and macro for TWL4030 */
41#define TWL4030_LEDEN_REG 0x00
42#define TWL4030_PWMA_REG 0x01
43
44#define TWL4030_LEDXON (1 << 0)
45#define TWL4030_LEDXPWM (1 << 4)
46#define TWL4030_LED_PINS (TWL4030_LEDXON | TWL4030_LEDXPWM)
47#define TWL4030_LED_TOGGLE(led, x) ((x) << (led))
48
49/* Register, bits and macro for TWL6030 */
50#define TWL6030_LED_PWM_CTRL1 0xf4
51#define TWL6030_LED_PWM_CTRL2 0xf5
52
53#define TWL6040_LED_MODE_HW 0x00
54#define TWL6040_LED_MODE_ON 0x01
55#define TWL6040_LED_MODE_OFF 0x02
56#define TWL6040_LED_MODE_MASK 0x03
57
58struct twl_pwmled_chip {
59 struct pwm_chip chip;
60 struct mutex mutex;
61};
62
63static inline struct twl_pwmled_chip *to_twl(struct pwm_chip *chip)
64{
65 return container_of(chip, struct twl_pwmled_chip, chip);
66}
67
68static int twl4030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
69 int duty_ns, int period_ns)
70{
71 int duty_cycle = DIV_ROUND_UP(duty_ns * TWL4030_LED_MAX, period_ns) + 1;
72 u8 pwm_config[2] = { 1, 0 };
73 int base, ret;
74
75 /*
76 * To configure the duty period:
77 * On-cycle is set to 1 (the minimum allowed value)
78 * The off time of 0 is not configurable, so the mapping is:
79 * 0 -> off cycle = 2,
80 * 1 -> off cycle = 2,
81 * 2 -> off cycle = 3,
82 * 126 - > off cycle 127,
83 * 127 - > off cycle 1
84 * When on cycle == off cycle the PWM will be always on
85 */
86 if (duty_cycle == 1)
87 duty_cycle = 2;
88 else if (duty_cycle > TWL4030_LED_MAX)
89 duty_cycle = 1;
90
91 base = pwm->hwpwm * 2 + TWL4030_PWMA_REG;
92
93 pwm_config[1] = duty_cycle;
94
95 ret = twl_i2c_write(TWL4030_MODULE_LED, pwm_config, base, 2);
96 if (ret < 0)
97 dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
98
99 return ret;
100}
101
102static int twl4030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm)
103{
104 struct twl_pwmled_chip *twl = to_twl(chip);
105 int ret;
106 u8 val;
107
108 mutex_lock(&twl->mutex);
109 ret = twl_i2c_read_u8(TWL4030_MODULE_LED, &val, TWL4030_LEDEN_REG);
110 if (ret < 0) {
111 dev_err(chip->dev, "%s: Failed to read LEDEN\n", pwm->label);
112 goto out;
113 }
114
115 val |= TWL4030_LED_TOGGLE(pwm->hwpwm, TWL4030_LED_PINS);
116
117 ret = twl_i2c_write_u8(TWL4030_MODULE_LED, val, TWL4030_LEDEN_REG);
118 if (ret < 0)
119 dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
120
121out:
122 mutex_unlock(&twl->mutex);
123 return ret;
124}
125
126static void twl4030_pwmled_disable(struct pwm_chip *chip,
127 struct pwm_device *pwm)
128{
129 struct twl_pwmled_chip *twl = to_twl(chip);
130 int ret;
131 u8 val;
132
133 mutex_lock(&twl->mutex);
134 ret = twl_i2c_read_u8(TWL4030_MODULE_LED, &val, TWL4030_LEDEN_REG);
135 if (ret < 0) {
136 dev_err(chip->dev, "%s: Failed to read LEDEN\n", pwm->label);
137 goto out;
138 }
139
140 val &= ~TWL4030_LED_TOGGLE(pwm->hwpwm, TWL4030_LED_PINS);
141
142 ret = twl_i2c_write_u8(TWL4030_MODULE_LED, val, TWL4030_LEDEN_REG);
143 if (ret < 0)
144 dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
145
146out:
147 mutex_unlock(&twl->mutex);
148}
149
150static int twl6030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
151 int duty_ns, int period_ns)
152{
153 int duty_cycle = (duty_ns * TWL6030_LED_MAX) / period_ns;
154 u8 on_time;
155 int ret;
156
157 on_time = duty_cycle & 0xff;
158
159 ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, on_time,
160 TWL6030_LED_PWM_CTRL1);
161 if (ret < 0)
162 dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
163
164 return ret;
165}
166
167static int twl6030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm)
168{
169 struct twl_pwmled_chip *twl = to_twl(chip);
170 int ret;
171 u8 val;
172
173 mutex_lock(&twl->mutex);
174 ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
175 if (ret < 0) {
176 dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
177 pwm->label);
178 goto out;
179 }
180
181 val &= ~TWL6040_LED_MODE_MASK;
182 val |= TWL6040_LED_MODE_ON;
183
184 ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
185 if (ret < 0)
186 dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
187
188out:
189 mutex_unlock(&twl->mutex);
190 return ret;
191}
192
193static void twl6030_pwmled_disable(struct pwm_chip *chip,
194 struct pwm_device *pwm)
195{
196 struct twl_pwmled_chip *twl = to_twl(chip);
197 int ret;
198 u8 val;
199
200 mutex_lock(&twl->mutex);
201 ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
202 if (ret < 0) {
203 dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
204 pwm->label);
205 goto out;
206 }
207
208 val &= ~TWL6040_LED_MODE_MASK;
209 val |= TWL6040_LED_MODE_OFF;
210
211 ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
212 if (ret < 0)
213 dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
214
215out:
216 mutex_unlock(&twl->mutex);
217}
218
219static int twl6030_pwmled_request(struct pwm_chip *chip, struct pwm_device *pwm)
220{
221 struct twl_pwmled_chip *twl = to_twl(chip);
222 int ret;
223 u8 val;
224
225 mutex_lock(&twl->mutex);
226 ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
227 if (ret < 0) {
228 dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
229 pwm->label);
230 goto out;
231 }
232
233 val &= ~TWL6040_LED_MODE_MASK;
234 val |= TWL6040_LED_MODE_OFF;
235
236 ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
237 if (ret < 0)
238 dev_err(chip->dev, "%s: Failed to request PWM\n", pwm->label);
239
240out:
241 mutex_unlock(&twl->mutex);
242 return ret;
243}
244
245static void twl6030_pwmled_free(struct pwm_chip *chip, struct pwm_device *pwm)
246{
247 struct twl_pwmled_chip *twl = to_twl(chip);
248 int ret;
249 u8 val;
250
251 mutex_lock(&twl->mutex);
252 ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
253 if (ret < 0) {
254 dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
255 pwm->label);
256 goto out;
257 }
258
259 val &= ~TWL6040_LED_MODE_MASK;
260 val |= TWL6040_LED_MODE_HW;
261
262 ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
263 if (ret < 0)
264 dev_err(chip->dev, "%s: Failed to free PWM\n", pwm->label);
265
266out:
267 mutex_unlock(&twl->mutex);
268}
269
270static const struct pwm_ops twl4030_pwmled_ops = {
271 .enable = twl4030_pwmled_enable,
272 .disable = twl4030_pwmled_disable,
273 .config = twl4030_pwmled_config,
274};
275
276static const struct pwm_ops twl6030_pwmled_ops = {
277 .enable = twl6030_pwmled_enable,
278 .disable = twl6030_pwmled_disable,
279 .config = twl6030_pwmled_config,
280 .request = twl6030_pwmled_request,
281 .free = twl6030_pwmled_free,
282};
283
284static int twl_pwmled_probe(struct platform_device *pdev)
285{
286 struct twl_pwmled_chip *twl;
287 int ret;
288
289 twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
290 if (!twl)
291 return -ENOMEM;
292
293 if (twl_class_is_4030()) {
294 twl->chip.ops = &twl4030_pwmled_ops;
295 twl->chip.npwm = 2;
296 } else {
297 twl->chip.ops = &twl6030_pwmled_ops;
298 twl->chip.npwm = 1;
299 }
300
301 twl->chip.dev = &pdev->dev;
302 twl->chip.base = -1;
303
304 mutex_init(&twl->mutex);
305
306 ret = pwmchip_add(&twl->chip);
307 if (ret < 0)
308 return ret;
309
310 platform_set_drvdata(pdev, twl);
311
312 return 0;
313}
314
315static int twl_pwmled_remove(struct platform_device *pdev)
316{
317 struct twl_pwmled_chip *twl = platform_get_drvdata(pdev);
318
319 return pwmchip_remove(&twl->chip);
320}
321
322#ifdef CONFIG_OF
323static struct of_device_id twl_pwmled_of_match[] = {
324 { .compatible = "ti,twl4030-pwmled" },
325 { .compatible = "ti,twl6030-pwmled" },
326 { },
327};
328MODULE_DEVICE_TABLE(of, twl_pwmled_of_match);
329#endif
330
331static struct platform_driver twl_pwmled_driver = {
332 .driver = {
333 .name = "twl-pwmled",
334 .of_match_table = of_match_ptr(twl_pwmled_of_match),
335 },
336 .probe = twl_pwmled_probe,
337 .remove = twl_pwmled_remove,
338};
339module_platform_driver(twl_pwmled_driver);
340
341MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
342MODULE_DESCRIPTION("PWM driver for TWL4030 and TWL6030 LED outputs");
343MODULE_ALIAS("platform:twl-pwmled");
344MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
new file mode 100644
index 000000000000..e65db95d5e59
--- /dev/null
+++ b/drivers/pwm/pwm-twl.c
@@ -0,0 +1,359 @@
1/*
2 * Driver for TWL4030/6030 Generic Pulse Width Modulator
3 *
4 * Copyright (C) 2012 Texas Instruments
5 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/pwm.h>
23#include <linux/i2c/twl.h>
24#include <linux/slab.h>
25
26/*
27 * This driver handles the PWMs of TWL4030 and TWL6030.
28 * The TRM names for the PWMs on TWL4030 are: PWM0, PWM1
29 * TWL6030 also have two PWMs named in the TRM as PWM1, PWM2
30 */
31
32#define TWL_PWM_MAX 0x7f
33
34/* Registers, bits and macro for TWL4030 */
35#define TWL4030_GPBR1_REG 0x0c
36#define TWL4030_PMBR1_REG 0x0d
37
38/* GPBR1 register bits */
39#define TWL4030_PWMXCLK_ENABLE (1 << 0)
40#define TWL4030_PWMX_ENABLE (1 << 2)
41#define TWL4030_PWMX_BITS (TWL4030_PWMX_ENABLE | TWL4030_PWMXCLK_ENABLE)
42#define TWL4030_PWM_TOGGLE(pwm, x) ((x) << (pwm))
43
44/* PMBR1 register bits */
45#define TWL4030_GPIO6_PWM0_MUTE_MASK (0x03 << 2)
46#define TWL4030_GPIO6_PWM0_MUTE_PWM0 (0x01 << 2)
47#define TWL4030_GPIO7_VIBRASYNC_PWM1_MASK (0x03 << 4)
48#define TWL4030_GPIO7_VIBRASYNC_PWM1_PWM1 (0x03 << 4)
49
50/* Register, bits and macro for TWL6030 */
51#define TWL6030_TOGGLE3_REG 0x92
52
53#define TWL6030_PWMXR (1 << 0)
54#define TWL6030_PWMXS (1 << 1)
55#define TWL6030_PWMXEN (1 << 2)
56#define TWL6030_PWM_TOGGLE(pwm, x) ((x) << (pwm * 3))
57
58struct twl_pwm_chip {
59 struct pwm_chip chip;
60 struct mutex mutex;
61 u8 twl6030_toggle3;
62 u8 twl4030_pwm_mux;
63};
64
65static inline struct twl_pwm_chip *to_twl(struct pwm_chip *chip)
66{
67 return container_of(chip, struct twl_pwm_chip, chip);
68}
69
70static int twl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
71 int duty_ns, int period_ns)
72{
73 int duty_cycle = DIV_ROUND_UP(duty_ns * TWL_PWM_MAX, period_ns) + 1;
74 u8 pwm_config[2] = { 1, 0 };
75 int base, ret;
76
77 /*
78 * To configure the duty period:
79 * On-cycle is set to 1 (the minimum allowed value)
80 * The off time of 0 is not configurable, so the mapping is:
81 * 0 -> off cycle = 2,
82 * 1 -> off cycle = 2,
83 * 2 -> off cycle = 3,
84 * 126 - > off cycle 127,
85 * 127 - > off cycle 1
86 * When on cycle == off cycle the PWM will be always on
87 */
88 if (duty_cycle == 1)
89 duty_cycle = 2;
90 else if (duty_cycle > TWL_PWM_MAX)
91 duty_cycle = 1;
92
93 base = pwm->hwpwm * 3;
94
95 pwm_config[1] = duty_cycle;
96
97 ret = twl_i2c_write(TWL_MODULE_PWM, pwm_config, base, 2);
98 if (ret < 0)
99 dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
100
101 return ret;
102}
103
104static int twl4030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
105{
106 struct twl_pwm_chip *twl = to_twl(chip);
107 int ret;
108 u8 val;
109
110 mutex_lock(&twl->mutex);
111 ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_GPBR1_REG);
112 if (ret < 0) {
113 dev_err(chip->dev, "%s: Failed to read GPBR1\n", pwm->label);
114 goto out;
115 }
116
117 val |= TWL4030_PWM_TOGGLE(pwm->hwpwm, TWL4030_PWMXCLK_ENABLE);
118
119 ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
120 if (ret < 0)
121 dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
122
123 val |= TWL4030_PWM_TOGGLE(pwm->hwpwm, TWL4030_PWMX_ENABLE);
124
125 ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
126 if (ret < 0)
127 dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
128
129out:
130 mutex_unlock(&twl->mutex);
131 return ret;
132}
133
134static void twl4030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
135{
136 struct twl_pwm_chip *twl = to_twl(chip);
137 int ret;
138 u8 val;
139
140 mutex_lock(&twl->mutex);
141 ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_GPBR1_REG);
142 if (ret < 0) {
143 dev_err(chip->dev, "%s: Failed to read GPBR1\n", pwm->label);
144 goto out;
145 }
146
147 val &= ~TWL4030_PWM_TOGGLE(pwm->hwpwm, TWL4030_PWMX_ENABLE);
148
149 ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
150 if (ret < 0)
151 dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
152
153 val &= ~TWL4030_PWM_TOGGLE(pwm->hwpwm, TWL4030_PWMXCLK_ENABLE);
154
155 ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
156 if (ret < 0)
157 dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
158
159out:
160 mutex_unlock(&twl->mutex);
161}
162
163static int twl4030_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
164{
165 struct twl_pwm_chip *twl = to_twl(chip);
166 int ret;
167 u8 val, mask, bits;
168
169 if (pwm->hwpwm == 1) {
170 mask = TWL4030_GPIO7_VIBRASYNC_PWM1_MASK;
171 bits = TWL4030_GPIO7_VIBRASYNC_PWM1_PWM1;
172 } else {
173 mask = TWL4030_GPIO6_PWM0_MUTE_MASK;
174 bits = TWL4030_GPIO6_PWM0_MUTE_PWM0;
175 }
176
177 mutex_lock(&twl->mutex);
178 ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_PMBR1_REG);
179 if (ret < 0) {
180 dev_err(chip->dev, "%s: Failed to read PMBR1\n", pwm->label);
181 goto out;
182 }
183
184 /* Save the current MUX configuration for the PWM */
185 twl->twl4030_pwm_mux &= ~mask;
186 twl->twl4030_pwm_mux |= (val & mask);
187
188 /* Select PWM functionality */
189 val &= ~mask;
190 val |= bits;
191
192 ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_PMBR1_REG);
193 if (ret < 0)
194 dev_err(chip->dev, "%s: Failed to request PWM\n", pwm->label);
195
196out:
197 mutex_unlock(&twl->mutex);
198 return ret;
199}
200
201static void twl4030_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
202{
203 struct twl_pwm_chip *twl = container_of(chip, struct twl_pwm_chip,
204 chip);
205 int ret;
206 u8 val, mask;
207
208 if (pwm->hwpwm == 1)
209 mask = TWL4030_GPIO7_VIBRASYNC_PWM1_MASK;
210 else
211 mask = TWL4030_GPIO6_PWM0_MUTE_MASK;
212
213 mutex_lock(&twl->mutex);
214 ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_PMBR1_REG);
215 if (ret < 0) {
216 dev_err(chip->dev, "%s: Failed to read PMBR1\n", pwm->label);
217 goto out;
218 }
219
220 /* Restore the MUX configuration for the PWM */
221 val &= ~mask;
222 val |= (twl->twl4030_pwm_mux & mask);
223
224 ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_PMBR1_REG);
225 if (ret < 0)
226 dev_err(chip->dev, "%s: Failed to free PWM\n", pwm->label);
227
228out:
229 mutex_unlock(&twl->mutex);
230}
231
232static int twl6030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
233{
234 struct twl_pwm_chip *twl = container_of(chip, struct twl_pwm_chip,
235 chip);
236 int ret;
237 u8 val;
238
239 mutex_lock(&twl->mutex);
240 val = twl->twl6030_toggle3;
241 val |= TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXS | TWL6030_PWMXEN);
242 val &= ~TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXR);
243
244 ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
245 if (ret < 0) {
246 dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
247 goto out;
248 }
249
250 twl->twl6030_toggle3 = val;
251out:
252 mutex_unlock(&twl->mutex);
253 return 0;
254}
255
256static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
257{
258 struct twl_pwm_chip *twl = container_of(chip, struct twl_pwm_chip,
259 chip);
260 int ret;
261 u8 val;
262
263 mutex_lock(&twl->mutex);
264 val = twl->twl6030_toggle3;
265 val |= TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXR);
266 val &= ~TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXS | TWL6030_PWMXEN);
267
268 ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
269 if (ret < 0) {
270 dev_err(chip->dev, "%s: Failed to read TOGGLE3\n", pwm->label);
271 goto out;
272 }
273
274 val |= TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXS | TWL6030_PWMXEN);
275
276 ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
277 if (ret < 0) {
278 dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
279 goto out;
280 }
281
282 twl->twl6030_toggle3 = val;
283out:
284 mutex_unlock(&twl->mutex);
285}
286
287static const struct pwm_ops twl4030_pwm_ops = {
288 .config = twl_pwm_config,
289 .enable = twl4030_pwm_enable,
290 .disable = twl4030_pwm_disable,
291 .request = twl4030_pwm_request,
292 .free = twl4030_pwm_free,
293};
294
295static const struct pwm_ops twl6030_pwm_ops = {
296 .config = twl_pwm_config,
297 .enable = twl6030_pwm_enable,
298 .disable = twl6030_pwm_disable,
299};
300
301static int twl_pwm_probe(struct platform_device *pdev)
302{
303 struct twl_pwm_chip *twl;
304 int ret;
305
306 twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
307 if (!twl)
308 return -ENOMEM;
309
310 if (twl_class_is_4030())
311 twl->chip.ops = &twl4030_pwm_ops;
312 else
313 twl->chip.ops = &twl6030_pwm_ops;
314
315 twl->chip.dev = &pdev->dev;
316 twl->chip.base = -1;
317 twl->chip.npwm = 2;
318
319 mutex_init(&twl->mutex);
320
321 ret = pwmchip_add(&twl->chip);
322 if (ret < 0)
323 return ret;
324
325 platform_set_drvdata(pdev, twl);
326
327 return 0;
328}
329
330static int twl_pwm_remove(struct platform_device *pdev)
331{
332 struct twl_pwm_chip *twl = platform_get_drvdata(pdev);
333
334 return pwmchip_remove(&twl->chip);
335}
336
337#ifdef CONFIG_OF
338static struct of_device_id twl_pwm_of_match[] = {
339 { .compatible = "ti,twl4030-pwm" },
340 { .compatible = "ti,twl6030-pwm" },
341 { },
342};
343MODULE_DEVICE_TABLE(of, twl_pwm_of_match);
344#endif
345
346static struct platform_driver twl_pwm_driver = {
347 .driver = {
348 .name = "twl-pwm",
349 .of_match_table = of_match_ptr(twl_pwm_of_match),
350 },
351 .probe = twl_pwm_probe,
352 .remove = twl_pwm_remove,
353};
354module_platform_driver(twl_pwm_driver);
355
356MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
357MODULE_DESCRIPTION("PWM driver for TWL4030 and TWL6030");
358MODULE_ALIAS("platform:twl-pwm");
359MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-twl6030.c b/drivers/pwm/pwm-twl6030.c
deleted file mode 100644
index 378a7e286366..000000000000
--- a/drivers/pwm/pwm-twl6030.c
+++ /dev/null
@@ -1,184 +0,0 @@
1/*
2 * twl6030_pwm.c
3 * Driver for PHOENIX (TWL6030) Pulse Width Modulator
4 *
5 * Copyright (C) 2010 Texas Instruments
6 * Author: Hemanth V <hemanthv@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/pwm.h>
24#include <linux/i2c/twl.h>
25#include <linux/slab.h>
26
27#define LED_PWM_CTRL1 0xF4
28#define LED_PWM_CTRL2 0xF5
29
30/* Max value for CTRL1 register */
31#define PWM_CTRL1_MAX 255
32
33/* Pull down disable */
34#define PWM_CTRL2_DIS_PD (1 << 6)
35
36/* Current control 2.5 milli Amps */
37#define PWM_CTRL2_CURR_02 (2 << 4)
38
39/* LED supply source */
40#define PWM_CTRL2_SRC_VAC (1 << 2)
41
42/* LED modes */
43#define PWM_CTRL2_MODE_HW (0 << 0)
44#define PWM_CTRL2_MODE_SW (1 << 0)
45#define PWM_CTRL2_MODE_DIS (2 << 0)
46
47#define PWM_CTRL2_MODE_MASK 0x3
48
49struct twl6030_pwm_chip {
50 struct pwm_chip chip;
51};
52
53static int twl6030_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
54{
55 int ret;
56 u8 val;
57
58 /* Configure PWM */
59 val = PWM_CTRL2_DIS_PD | PWM_CTRL2_CURR_02 | PWM_CTRL2_SRC_VAC |
60 PWM_CTRL2_MODE_HW;
61
62 ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, LED_PWM_CTRL2);
63 if (ret < 0) {
64 dev_err(chip->dev, "%s: Failed to configure PWM, Error %d\n",
65 pwm->label, ret);
66 return ret;
67 }
68
69 return 0;
70}
71
72static int twl6030_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
73 int duty_ns, int period_ns)
74{
75 u8 duty_cycle = (duty_ns * PWM_CTRL1_MAX) / period_ns;
76 int ret;
77
78 ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, duty_cycle, LED_PWM_CTRL1);
79 if (ret < 0) {
80 pr_err("%s: Failed to configure PWM, Error %d\n",
81 pwm->label, ret);
82 return ret;
83 }
84
85 return 0;
86}
87
88static int twl6030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
89{
90 int ret;
91 u8 val;
92
93 ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, LED_PWM_CTRL2);
94 if (ret < 0) {
95 dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n",
96 pwm->label, ret);
97 return ret;
98 }
99
100 /* Change mode to software control */
101 val &= ~PWM_CTRL2_MODE_MASK;
102 val |= PWM_CTRL2_MODE_SW;
103
104 ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, LED_PWM_CTRL2);
105 if (ret < 0) {
106 dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n",
107 pwm->label, ret);
108 return ret;
109 }
110
111 twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, LED_PWM_CTRL2);
112 return 0;
113}
114
115static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
116{
117 int ret;
118 u8 val;
119
120 ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, LED_PWM_CTRL2);
121 if (ret < 0) {
122 dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
123 pwm->label, ret);
124 return;
125 }
126
127 val &= ~PWM_CTRL2_MODE_MASK;
128 val |= PWM_CTRL2_MODE_HW;
129
130 ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, LED_PWM_CTRL2);
131 if (ret < 0) {
132 dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
133 pwm->label, ret);
134 }
135}
136
137static const struct pwm_ops twl6030_pwm_ops = {
138 .request = twl6030_pwm_request,
139 .config = twl6030_pwm_config,
140 .enable = twl6030_pwm_enable,
141 .disable = twl6030_pwm_disable,
142};
143
144static int twl6030_pwm_probe(struct platform_device *pdev)
145{
146 struct twl6030_pwm_chip *twl6030;
147 int ret;
148
149 twl6030 = devm_kzalloc(&pdev->dev, sizeof(*twl6030), GFP_KERNEL);
150 if (!twl6030)
151 return -ENOMEM;
152
153 twl6030->chip.dev = &pdev->dev;
154 twl6030->chip.ops = &twl6030_pwm_ops;
155 twl6030->chip.base = -1;
156 twl6030->chip.npwm = 1;
157
158 ret = pwmchip_add(&twl6030->chip);
159 if (ret < 0)
160 return ret;
161
162 platform_set_drvdata(pdev, twl6030);
163
164 return 0;
165}
166
167static int twl6030_pwm_remove(struct platform_device *pdev)
168{
169 struct twl6030_pwm_chip *twl6030 = platform_get_drvdata(pdev);
170
171 return pwmchip_remove(&twl6030->chip);
172}
173
174static struct platform_driver twl6030_pwm_driver = {
175 .driver = {
176 .name = "twl6030-pwm",
177 },
178 .probe = twl6030_pwm_probe,
179 .remove = twl6030_pwm_remove,
180};
181module_platform_driver(twl6030_pwm_driver);
182
183MODULE_ALIAS("platform:twl6030-pwm");
184MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index ad14389b7144..b0ba2d403439 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * drivers/pwm/pwm-vt8500.c 2 * drivers/pwm/pwm-vt8500.c
3 * 3 *
4 * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> 4 * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz>
5 * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
5 * 6 *
6 * This software is licensed under the terms of the GNU General Public 7 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and 8 * License version 2, as published by the Free Software Foundation, and
@@ -21,14 +22,24 @@
21#include <linux/io.h> 22#include <linux/io.h>
22#include <linux/pwm.h> 23#include <linux/pwm.h>
23#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/clk.h>
24 26
25#include <asm/div64.h> 27#include <asm/div64.h>
26 28
27#define VT8500_NR_PWMS 4 29#include <linux/of.h>
30#include <linux/of_device.h>
31#include <linux/of_address.h>
32
33/*
34 * SoC architecture allocates register space for 4 PWMs but only
35 * 2 are currently implemented.
36 */
37#define VT8500_NR_PWMS 2
28 38
29struct vt8500_chip { 39struct vt8500_chip {
30 struct pwm_chip chip; 40 struct pwm_chip chip;
31 void __iomem *base; 41 void __iomem *base;
42 struct clk *clk;
32}; 43};
33 44
34#define to_vt8500_chip(chip) container_of(chip, struct vt8500_chip, chip) 45#define to_vt8500_chip(chip) container_of(chip, struct vt8500_chip, chip)
@@ -51,8 +62,15 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
51 struct vt8500_chip *vt8500 = to_vt8500_chip(chip); 62 struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
52 unsigned long long c; 63 unsigned long long c;
53 unsigned long period_cycles, prescale, pv, dc; 64 unsigned long period_cycles, prescale, pv, dc;
65 int err;
54 66
55 c = 25000000/2; /* wild guess --- need to implement clocks */ 67 err = clk_enable(vt8500->clk);
68 if (err < 0) {
69 dev_err(chip->dev, "failed to enable clock\n");
70 return err;
71 }
72
73 c = clk_get_rate(vt8500->clk);
56 c = c * period_ns; 74 c = c * period_ns;
57 do_div(c, 1000000000); 75 do_div(c, 1000000000);
58 period_cycles = c; 76 period_cycles = c;
@@ -64,8 +82,10 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
64 if (pv > 4095) 82 if (pv > 4095)
65 pv = 4095; 83 pv = 4095;
66 84
67 if (prescale > 1023) 85 if (prescale > 1023) {
86 clk_disable(vt8500->clk);
68 return -EINVAL; 87 return -EINVAL;
88 }
69 89
70 c = (unsigned long long)pv * duty_ns; 90 c = (unsigned long long)pv * duty_ns;
71 do_div(c, period_ns); 91 do_div(c, period_ns);
@@ -80,13 +100,21 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
80 pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 3)); 100 pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 3));
81 writel(dc, vt8500->base + 0xc + (pwm->hwpwm << 4)); 101 writel(dc, vt8500->base + 0xc + (pwm->hwpwm << 4));
82 102
103 clk_disable(vt8500->clk);
83 return 0; 104 return 0;
84} 105}
85 106
86static int vt8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) 107static int vt8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
87{ 108{
109 int err;
88 struct vt8500_chip *vt8500 = to_vt8500_chip(chip); 110 struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
89 111
112 err = clk_enable(vt8500->clk);
113 if (err < 0) {
114 dev_err(chip->dev, "failed to enable clock\n");
115 return err;
116 }
117
90 pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 0)); 118 pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 0));
91 writel(5, vt8500->base + (pwm->hwpwm << 4)); 119 writel(5, vt8500->base + (pwm->hwpwm << 4));
92 return 0; 120 return 0;
@@ -98,6 +126,8 @@ static void vt8500_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
98 126
99 pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 0)); 127 pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 0));
100 writel(0, vt8500->base + (pwm->hwpwm << 4)); 128 writel(0, vt8500->base + (pwm->hwpwm << 4));
129
130 clk_disable(vt8500->clk);
101} 131}
102 132
103static struct pwm_ops vt8500_pwm_ops = { 133static struct pwm_ops vt8500_pwm_ops = {
@@ -107,12 +137,24 @@ static struct pwm_ops vt8500_pwm_ops = {
107 .owner = THIS_MODULE, 137 .owner = THIS_MODULE,
108}; 138};
109 139
110static int __devinit pwm_probe(struct platform_device *pdev) 140static const struct of_device_id vt8500_pwm_dt_ids[] = {
141 { .compatible = "via,vt8500-pwm", },
142 { /* Sentinel */ }
143};
144MODULE_DEVICE_TABLE(of, vt8500_pwm_dt_ids);
145
146static int vt8500_pwm_probe(struct platform_device *pdev)
111{ 147{
112 struct vt8500_chip *chip; 148 struct vt8500_chip *chip;
113 struct resource *r; 149 struct resource *r;
150 struct device_node *np = pdev->dev.of_node;
114 int ret; 151 int ret;
115 152
153 if (!np) {
154 dev_err(&pdev->dev, "invalid devicetree node\n");
155 return -EINVAL;
156 }
157
116 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); 158 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
117 if (chip == NULL) { 159 if (chip == NULL) {
118 dev_err(&pdev->dev, "failed to allocate memory\n"); 160 dev_err(&pdev->dev, "failed to allocate memory\n");
@@ -124,6 +166,12 @@ static int __devinit pwm_probe(struct platform_device *pdev)
124 chip->chip.base = -1; 166 chip->chip.base = -1;
125 chip->chip.npwm = VT8500_NR_PWMS; 167 chip->chip.npwm = VT8500_NR_PWMS;
126 168
169 chip->clk = devm_clk_get(&pdev->dev, NULL);
170 if (IS_ERR(chip->clk)) {
171 dev_err(&pdev->dev, "clock source not specified\n");
172 return PTR_ERR(chip->clk);
173 }
174
127 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 175 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
128 if (r == NULL) { 176 if (r == NULL) {
129 dev_err(&pdev->dev, "no memory resource defined\n"); 177 dev_err(&pdev->dev, "no memory resource defined\n");
@@ -131,18 +179,26 @@ static int __devinit pwm_probe(struct platform_device *pdev)
131 } 179 }
132 180
133 chip->base = devm_request_and_ioremap(&pdev->dev, r); 181 chip->base = devm_request_and_ioremap(&pdev->dev, r);
134 if (chip->base == NULL) 182 if (!chip->base)
135 return -EADDRNOTAVAIL; 183 return -EADDRNOTAVAIL;
136 184
185 ret = clk_prepare(chip->clk);
186 if (ret < 0) {
187 dev_err(&pdev->dev, "failed to prepare clock\n");
188 return ret;
189 }
190
137 ret = pwmchip_add(&chip->chip); 191 ret = pwmchip_add(&chip->chip);
138 if (ret < 0) 192 if (ret < 0) {
193 dev_err(&pdev->dev, "failed to add PWM chip\n");
139 return ret; 194 return ret;
195 }
140 196
141 platform_set_drvdata(pdev, chip); 197 platform_set_drvdata(pdev, chip);
142 return ret; 198 return ret;
143} 199}
144 200
145static int __devexit pwm_remove(struct platform_device *pdev) 201static int vt8500_pwm_remove(struct platform_device *pdev)
146{ 202{
147 struct vt8500_chip *chip; 203 struct vt8500_chip *chip;
148 204
@@ -150,28 +206,22 @@ static int __devexit pwm_remove(struct platform_device *pdev)
150 if (chip == NULL) 206 if (chip == NULL)
151 return -ENODEV; 207 return -ENODEV;
152 208
209 clk_unprepare(chip->clk);
210
153 return pwmchip_remove(&chip->chip); 211 return pwmchip_remove(&chip->chip);
154} 212}
155 213
156static struct platform_driver pwm_driver = { 214static struct platform_driver vt8500_pwm_driver = {
215 .probe = vt8500_pwm_probe,
216 .remove = vt8500_pwm_remove,
157 .driver = { 217 .driver = {
158 .name = "vt8500-pwm", 218 .name = "vt8500-pwm",
159 .owner = THIS_MODULE, 219 .owner = THIS_MODULE,
220 .of_match_table = vt8500_pwm_dt_ids,
160 }, 221 },
161 .probe = pwm_probe,
162 .remove = __devexit_p(pwm_remove),
163}; 222};
223module_platform_driver(vt8500_pwm_driver);
164 224
165static int __init pwm_init(void) 225MODULE_DESCRIPTION("VT8500 PWM Driver");
166{ 226MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
167 return platform_driver_register(&pwm_driver); 227MODULE_LICENSE("GPL v2");
168}
169arch_initcall(pwm_init);
170
171static void __exit pwm_exit(void)
172{
173 platform_driver_unregister(&pwm_driver);
174}
175module_exit(pwm_exit);
176
177MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 75c0c4f5fdf2..ab34497bcfee 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -20,6 +20,7 @@
20#include <linux/spi/spi.h> 20#include <linux/spi/spi.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/platform_data/atmel.h> 22#include <linux/platform_data/atmel.h>
23#include <linux/of.h>
23 24
24#include <asm/io.h> 25#include <asm/io.h>
25#include <asm/gpio.h> 26#include <asm/gpio.h>
@@ -768,6 +769,10 @@ static int atmel_spi_setup(struct spi_device *spi)
768 769
769 /* chipselect must have been muxed as GPIO (e.g. in board setup) */ 770 /* chipselect must have been muxed as GPIO (e.g. in board setup) */
770 npcs_pin = (unsigned int)spi->controller_data; 771 npcs_pin = (unsigned int)spi->controller_data;
772
773 if (gpio_is_valid(spi->cs_gpio))
774 npcs_pin = spi->cs_gpio;
775
771 asd = spi->controller_state; 776 asd = spi->controller_state;
772 if (!asd) { 777 if (!asd) {
773 asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL); 778 asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL);
@@ -937,8 +942,9 @@ static int atmel_spi_probe(struct platform_device *pdev)
937 /* the spi->mode bits understood by this driver: */ 942 /* the spi->mode bits understood by this driver: */
938 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 943 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
939 944
945 master->dev.of_node = pdev->dev.of_node;
940 master->bus_num = pdev->id; 946 master->bus_num = pdev->id;
941 master->num_chipselect = 4; 947 master->num_chipselect = master->dev.of_node ? 0 : 4;
942 master->setup = atmel_spi_setup; 948 master->setup = atmel_spi_setup;
943 master->transfer = atmel_spi_transfer; 949 master->transfer = atmel_spi_transfer;
944 master->cleanup = atmel_spi_cleanup; 950 master->cleanup = atmel_spi_cleanup;
@@ -1064,11 +1070,20 @@ static int atmel_spi_resume(struct platform_device *pdev)
1064#define atmel_spi_resume NULL 1070#define atmel_spi_resume NULL
1065#endif 1071#endif
1066 1072
1073#if defined(CONFIG_OF)
1074static const struct of_device_id atmel_spi_dt_ids[] = {
1075 { .compatible = "atmel,at91rm9200-spi" },
1076 { /* sentinel */ }
1077};
1078
1079MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids);
1080#endif
1067 1081
1068static struct platform_driver atmel_spi_driver = { 1082static struct platform_driver atmel_spi_driver = {
1069 .driver = { 1083 .driver = {
1070 .name = "atmel_spi", 1084 .name = "atmel_spi",
1071 .owner = THIS_MODULE, 1085 .owner = THIS_MODULE,
1086 .of_match_table = of_match_ptr(atmel_spi_dt_ids),
1072 }, 1087 },
1073 .suspend = atmel_spi_suspend, 1088 .suspend = atmel_spi_suspend,
1074 .resume = atmel_spi_resume, 1089 .resume = atmel_spi_resume,
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 4dd7b7ce5c5a..ad93231a8038 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -215,6 +215,10 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
215 writel(0, regs + S3C64XX_SPI_PACKET_CNT); 215 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
216 216
217 val = readl(regs + S3C64XX_SPI_CH_CFG); 217 val = readl(regs + S3C64XX_SPI_CH_CFG);
218 val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
219 writel(val, regs + S3C64XX_SPI_CH_CFG);
220
221 val = readl(regs + S3C64XX_SPI_CH_CFG);
218 val |= S3C64XX_SPI_CH_SW_RST; 222 val |= S3C64XX_SPI_CH_SW_RST;
219 val &= ~S3C64XX_SPI_CH_HS_EN; 223 val &= ~S3C64XX_SPI_CH_HS_EN;
220 writel(val, regs + S3C64XX_SPI_CH_CFG); 224 writel(val, regs + S3C64XX_SPI_CH_CFG);
@@ -248,10 +252,6 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
248 val = readl(regs + S3C64XX_SPI_MODE_CFG); 252 val = readl(regs + S3C64XX_SPI_MODE_CFG);
249 val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); 253 val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
250 writel(val, regs + S3C64XX_SPI_MODE_CFG); 254 writel(val, regs + S3C64XX_SPI_MODE_CFG);
251
252 val = readl(regs + S3C64XX_SPI_CH_CFG);
253 val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
254 writel(val, regs + S3C64XX_SPI_CH_CFG);
255} 255}
256 256
257static void s3c64xx_spi_dmacb(void *data) 257static void s3c64xx_spi_dmacb(void *data)
@@ -771,8 +771,6 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
771 if (list_is_last(&xfer->transfer_list, 771 if (list_is_last(&xfer->transfer_list,
772 &msg->transfers)) 772 &msg->transfers))
773 cs_toggle = 1; 773 cs_toggle = 1;
774 else
775 disable_cs(sdd, spi);
776 } 774 }
777 775
778 msg->actual_length += xfer->len; 776 msg->actual_length += xfer->len;
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 32f7b55fce09..60cfae51c713 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -290,7 +290,7 @@ static int hspi_probe(struct platform_device *pdev)
290 } 290 }
291 291
292 clk = clk_get(NULL, "shyway_clk"); 292 clk = clk_get(NULL, "shyway_clk");
293 if (!clk) { 293 if (IS_ERR(clk)) {
294 dev_err(&pdev->dev, "shyway_clk is required\n"); 294 dev_err(&pdev->dev, "shyway_clk is required\n");
295 ret = -EINVAL; 295 ret = -EINVAL;
296 goto error0; 296 goto error0;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ab095acdb2a8..19ee901577da 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -824,6 +824,7 @@ static void of_register_spi_devices(struct spi_master *master)
824 struct spi_device *spi; 824 struct spi_device *spi;
825 struct device_node *nc; 825 struct device_node *nc;
826 const __be32 *prop; 826 const __be32 *prop;
827 char modalias[SPI_NAME_SIZE + 4];
827 int rc; 828 int rc;
828 int len; 829 int len;
829 830
@@ -887,7 +888,9 @@ static void of_register_spi_devices(struct spi_master *master)
887 spi->dev.of_node = nc; 888 spi->dev.of_node = nc;
888 889
889 /* Register the new device */ 890 /* Register the new device */
890 request_module(spi->modalias); 891 snprintf(modalias, sizeof(modalias), "%s%s", SPI_MODULE_PREFIX,
892 spi->modalias);
893 request_module(modalias);
891 rc = spi_add_device(spi); 894 rc = spi_add_device(spi);
892 if (rc) { 895 if (rc) {
893 dev_err(&master->dev, "spi_device register error %s\n", 896 dev_err(&master->dev, "spi_device register error %s\n",
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index 3a6d5419e3e3..146fea8aa431 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -107,7 +107,6 @@ void locomolcd_power(int on)
107} 107}
108EXPORT_SYMBOL(locomolcd_power); 108EXPORT_SYMBOL(locomolcd_power);
109 109
110
111static int current_intensity; 110static int current_intensity;
112 111
113static int locomolcd_set_intensity(struct backlight_device *bd) 112static int locomolcd_set_intensity(struct backlight_device *bd)
@@ -122,13 +121,25 @@ static int locomolcd_set_intensity(struct backlight_device *bd)
122 intensity = 0; 121 intensity = 0;
123 122
124 switch (intensity) { 123 switch (intensity) {
125 /* AC and non-AC are handled differently, but produce same results in sharp code? */ 124 /*
126 case 0: locomo_frontlight_set(locomolcd_dev, 0, 0, 161); break; 125 * AC and non-AC are handled differently,
127 case 1: locomo_frontlight_set(locomolcd_dev, 117, 0, 161); break; 126 * but produce same results in sharp code?
128 case 2: locomo_frontlight_set(locomolcd_dev, 163, 0, 148); break; 127 */
129 case 3: locomo_frontlight_set(locomolcd_dev, 194, 0, 161); break; 128 case 0:
130 case 4: locomo_frontlight_set(locomolcd_dev, 194, 1, 161); break; 129 locomo_frontlight_set(locomolcd_dev, 0, 0, 161);
131 130 break;
131 case 1:
132 locomo_frontlight_set(locomolcd_dev, 117, 0, 161);
133 break;
134 case 2:
135 locomo_frontlight_set(locomolcd_dev, 163, 0, 148);
136 break;
137 case 3:
138 locomo_frontlight_set(locomolcd_dev, 194, 0, 161);
139 break;
140 case 4:
141 locomo_frontlight_set(locomolcd_dev, 194, 1, 161);
142 break;
132 default: 143 default:
133 return -ENODEV; 144 return -ENODEV;
134 } 145 }
@@ -175,9 +186,11 @@ static int locomolcd_probe(struct locomo_dev *ldev)
175 186
176 locomo_gpio_set_dir(ldev->dev.parent, LOCOMO_GPIO_FL_VR, 0); 187 locomo_gpio_set_dir(ldev->dev.parent, LOCOMO_GPIO_FL_VR, 0);
177 188
178 /* the poodle_lcd_power function is called for the first time 189 /*
190 * the poodle_lcd_power function is called for the first time
179 * from fs_initcall, which is before locomo is activated. 191 * from fs_initcall, which is before locomo is activated.
180 * We need to recall poodle_lcd_power here*/ 192 * We need to recall poodle_lcd_power here
193 */
181 if (machine_is_poodle()) 194 if (machine_is_poodle())
182 locomolcd_power(1); 195 locomolcd_power(1);
183 196
@@ -190,8 +203,8 @@ static int locomolcd_probe(struct locomo_dev *ldev)
190 &ldev->dev, NULL, 203 &ldev->dev, NULL,
191 &locomobl_data, &props); 204 &locomobl_data, &props);
192 205
193 if (IS_ERR (locomolcd_bl_device)) 206 if (IS_ERR(locomolcd_bl_device))
194 return PTR_ERR (locomolcd_bl_device); 207 return PTR_ERR(locomolcd_bl_device);
195 208
196 /* Set up frontlight so that screen is readable */ 209 /* Set up frontlight so that screen is readable */
197 locomolcd_bl_device->props.brightness = 2; 210 locomolcd_bl_device->props.brightness = 2;
@@ -226,7 +239,6 @@ static struct locomo_driver poodle_lcd_driver = {
226 .resume = locomolcd_resume, 239 .resume = locomolcd_resume,
227}; 240};
228 241
229
230static int __init locomolcd_init(void) 242static int __init locomolcd_init(void)
231{ 243{
232 return locomo_driver_register(&poodle_lcd_driver); 244 return locomo_driver_register(&poodle_lcd_driver);
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 9349bb37a2fe..ca3ab3f9ca70 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -56,13 +56,15 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
56 struct ceph_nfs_confh *cfh = (void *)rawfh; 56 struct ceph_nfs_confh *cfh = (void *)rawfh;
57 int connected_handle_length = sizeof(*cfh)/4; 57 int connected_handle_length = sizeof(*cfh)/4;
58 int handle_length = sizeof(*fh)/4; 58 int handle_length = sizeof(*fh)/4;
59 struct dentry *dentry = d_find_alias(inode); 59 struct dentry *dentry;
60 struct dentry *parent; 60 struct dentry *parent;
61 61
62 /* don't re-export snaps */ 62 /* don't re-export snaps */
63 if (ceph_snap(inode) != CEPH_NOSNAP) 63 if (ceph_snap(inode) != CEPH_NOSNAP)
64 return -EINVAL; 64 return -EINVAL;
65 65
66 dentry = d_find_alias(inode);
67
66 /* if we found an alias, generate a connectable fh */ 68 /* if we found an alias, generate a connectable fh */
67 if (*max_len >= connected_handle_length && dentry) { 69 if (*max_len >= connected_handle_length && dentry) {
68 dout("encode_fh %p connectable\n", dentry); 70 dout("encode_fh %p connectable\n", dentry);
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index 0c96eb52c797..03310721712f 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -417,14 +417,16 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
417 spin_unlock(&c->erase_completion_lock); 417 spin_unlock(&c->erase_completion_lock);
418 418
419 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); 419 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
420 if (ret) 420
421 return ret;
422 /* Just lock it again and continue. Nothing much can change because 421 /* Just lock it again and continue. Nothing much can change because
423 we hold c->alloc_sem anyway. In fact, it's not entirely clear why 422 we hold c->alloc_sem anyway. In fact, it's not entirely clear why
424 we hold c->erase_completion_lock in the majority of this function... 423 we hold c->erase_completion_lock in the majority of this function...
425 but that's a question for another (more caffeine-rich) day. */ 424 but that's a question for another (more caffeine-rich) day. */
426 spin_lock(&c->erase_completion_lock); 425 spin_lock(&c->erase_completion_lock);
427 426
427 if (ret)
428 return ret;
429
428 waste = jeb->free_size; 430 waste = jeb->free_size;
429 jffs2_link_node_ref(c, jeb, 431 jffs2_link_node_ref(c, jeb,
430 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE, 432 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
diff --git a/include/linux/asn1.h b/include/linux/asn1.h
index 5c3f4e4b9a23..eed6982860ba 100644
--- a/include/linux/asn1.h
+++ b/include/linux/asn1.h
@@ -64,4 +64,6 @@ enum asn1_tag {
64 ASN1_LONG_TAG = 31 /* Long form tag */ 64 ASN1_LONG_TAG = 31 /* Long form tag */
65}; 65};
66 66
67#define ASN1_INDEFINITE_LENGTH 0x80
68
67#endif /* _LINUX_ASN1_H */ 69#endif /* _LINUX_ASN1_H */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 93b1e091b1e9..e0ce311011c0 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -350,6 +350,7 @@ extern void bcma_core_set_clockmode(struct bcma_device *core,
350 enum bcma_clkmode clkmode); 350 enum bcma_clkmode clkmode);
351extern void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, 351extern void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status,
352 bool on); 352 bool on);
353extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset);
353#define BCMA_DMA_TRANSLATION_MASK 0xC0000000 354#define BCMA_DMA_TRANSLATION_MASK 0xC0000000
354#define BCMA_DMA_TRANSLATION_NONE 0x00000000 355#define BCMA_DMA_TRANSLATION_NONE 0x00000000
355#define BCMA_DMA_TRANSLATION_DMA32_CMT 0x40000000 /* Client Mode Translation for 32-bit DMA */ 356#define BCMA_DMA_TRANSLATION_DMA32_CMT 0x40000000 /* Client Mode Translation for 32-bit DMA */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index acb4f7bbbd32..f94bc83011ed 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1188,14 +1188,25 @@ static inline int queue_discard_alignment(struct request_queue *q)
1188 1188
1189static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1189static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
1190{ 1190{
1191 sector_t alignment = sector << 9; 1191 unsigned int alignment, granularity, offset;
1192 alignment = sector_div(alignment, lim->discard_granularity);
1193 1192
1194 if (!lim->max_discard_sectors) 1193 if (!lim->max_discard_sectors)
1195 return 0; 1194 return 0;
1196 1195
1197 alignment = lim->discard_granularity + lim->discard_alignment - alignment; 1196 /* Why are these in bytes, not sectors? */
1198 return sector_div(alignment, lim->discard_granularity); 1197 alignment = lim->discard_alignment >> 9;
1198 granularity = lim->discard_granularity >> 9;
1199 if (!granularity)
1200 return 0;
1201
1202 /* Offset of the partition start in 'granularity' sectors */
1203 offset = sector_div(sector, granularity);
1204
1205 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
1206 offset = (granularity + alignment - offset) % granularity;
1207
1208 /* Turn it back into bytes, gaah */
1209 return offset << 9;
1199} 1210}
1200 1211
1201static inline int bdev_discard_alignment(struct block_device *bdev) 1212static inline int bdev_discard_alignment(struct block_device *bdev)
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 412bc6c2b023..662fd1b4c42a 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -31,6 +31,8 @@
31 31
32#define __linktime_error(message) __attribute__((__error__(message))) 32#define __linktime_error(message) __attribute__((__error__(message)))
33 33
34#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
35
34#if __GNUC_MINOR__ >= 5 36#if __GNUC_MINOR__ >= 5
35/* 37/*
36 * Mark a position in code as unreachable. This can be used to 38 * Mark a position in code as unreachable. This can be used to
@@ -63,3 +65,13 @@
63#define __compiletime_warning(message) __attribute__((warning(message))) 65#define __compiletime_warning(message) __attribute__((warning(message)))
64#define __compiletime_error(message) __attribute__((error(message))) 66#define __compiletime_error(message) __attribute__((error(message)))
65#endif 67#endif
68
69#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
70#if __GNUC_MINOR__ >= 4
71#define __HAVE_BUILTIN_BSWAP32__
72#define __HAVE_BUILTIN_BSWAP64__
73#endif
74#if __GNUC_MINOR__ >= 8 || (defined(__powerpc__) && __GNUC_MINOR__ >= 6)
75#define __HAVE_BUILTIN_BSWAP16__
76#endif
77#endif
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index d8e636e5607d..973ce10c40b6 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -29,3 +29,10 @@
29#endif 29#endif
30 30
31#define uninitialized_var(x) x 31#define uninitialized_var(x) x
32
33#ifndef __HAVE_BUILTIN_BSWAP16__
34/* icc has this, but it's called _bswap16 */
35#define __HAVE_BUILTIN_BSWAP16__
36#define __builtin_bswap16 _bswap16
37#endif
38
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index b121554f1fe2..dd852b73b286 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -44,6 +44,10 @@ extern void __chk_io_ptr(const volatile void __iomem *);
44# define __rcu 44# define __rcu
45#endif 45#endif
46 46
47/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
48#define ___PASTE(a,b) a##b
49#define __PASTE(a,b) ___PASTE(a,b)
50
47#ifdef __KERNEL__ 51#ifdef __KERNEL__
48 52
49#ifdef __GNUC__ 53#ifdef __GNUC__
@@ -166,6 +170,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
166 (typeof(ptr)) (__ptr + (off)); }) 170 (typeof(ptr)) (__ptr + (off)); })
167#endif 171#endif
168 172
173/* Not-quite-unique ID. */
174#ifndef __UNIQUE_ID
175# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
176#endif
177
169#endif /* __KERNEL__ */ 178#endif /* __KERNEL__ */
170 179
171#endif /* __ASSEMBLY__ */ 180#endif /* __ASSEMBLY__ */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f74856e17e48..0f615eb23d05 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -30,6 +30,7 @@ struct vm_area_struct;
30#define ___GFP_HARDWALL 0x20000u 30#define ___GFP_HARDWALL 0x20000u
31#define ___GFP_THISNODE 0x40000u 31#define ___GFP_THISNODE 0x40000u
32#define ___GFP_RECLAIMABLE 0x80000u 32#define ___GFP_RECLAIMABLE 0x80000u
33#define ___GFP_KMEMCG 0x100000u
33#define ___GFP_NOTRACK 0x200000u 34#define ___GFP_NOTRACK 0x200000u
34#define ___GFP_NO_KSWAPD 0x400000u 35#define ___GFP_NO_KSWAPD 0x400000u
35#define ___GFP_OTHER_NODE 0x800000u 36#define ___GFP_OTHER_NODE 0x800000u
@@ -89,6 +90,7 @@ struct vm_area_struct;
89 90
90#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) 91#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
91#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ 92#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
93#define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
92#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ 94#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
93 95
94/* 96/*
@@ -365,6 +367,9 @@ extern void free_pages(unsigned long addr, unsigned int order);
365extern void free_hot_cold_page(struct page *page, int cold); 367extern void free_hot_cold_page(struct page *page, int cold);
366extern void free_hot_cold_page_list(struct list_head *list, int cold); 368extern void free_hot_cold_page_list(struct list_head *list, int cold);
367 369
370extern void __free_memcg_kmem_pages(struct page *page, unsigned int order);
371extern void free_memcg_kmem_pages(unsigned long addr, unsigned int order);
372
368#define __free_page(page) __free_pages((page), 0) 373#define __free_page(page) __free_pages((page), 0)
369#define free_page(addr) free_pages((addr), 0) 374#define free_page(addr) free_pages((addr), 0)
370 375
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index d73878c694b3..ce8217f7b5c2 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -62,7 +62,7 @@ extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
62 struct page *page); 62 struct page *page);
63extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, 63extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
64 struct hugetlb_cgroup *h_cg); 64 struct hugetlb_cgroup *h_cg);
65extern int hugetlb_cgroup_file_init(int idx) __init; 65extern void hugetlb_cgroup_file_init(void) __init;
66extern void hugetlb_cgroup_migrate(struct page *oldhpage, 66extern void hugetlb_cgroup_migrate(struct page *oldhpage,
67 struct page *newhpage); 67 struct page *newhpage);
68 68
@@ -111,9 +111,8 @@ hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
111 return; 111 return;
112} 112}
113 113
114static inline int __init hugetlb_cgroup_file_init(int idx) 114static inline void hugetlb_cgroup_file_init(void)
115{ 115{
116 return 0;
117} 116}
118 117
119static inline void hugetlb_cgroup_migrate(struct page *oldhpage, 118static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
diff --git a/include/linux/i2c-omap.h b/include/linux/i2c-omap.h
index 92a0dc75bc74..babe0cf6d56b 100644
--- a/include/linux/i2c-omap.h
+++ b/include/linux/i2c-omap.h
@@ -20,8 +20,6 @@
20#define OMAP_I2C_FLAG_NO_FIFO BIT(0) 20#define OMAP_I2C_FLAG_NO_FIFO BIT(0)
21#define OMAP_I2C_FLAG_SIMPLE_CLOCK BIT(1) 21#define OMAP_I2C_FLAG_SIMPLE_CLOCK BIT(1)
22#define OMAP_I2C_FLAG_16BIT_DATA_REG BIT(2) 22#define OMAP_I2C_FLAG_16BIT_DATA_REG BIT(2)
23#define OMAP_I2C_FLAG_RESET_REGS_POSTIDLE BIT(3)
24#define OMAP_I2C_FLAG_APPLY_ERRATA_I207 BIT(4)
25#define OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK BIT(5) 23#define OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK BIT(5)
26#define OMAP_I2C_FLAG_FORCE_19200_INT_CLK BIT(6) 24#define OMAP_I2C_FLAG_FORCE_19200_INT_CLK BIT(6)
27/* how the CPU address bus must be translated for I2C unit access */ 25/* how the CPU address bus must be translated for I2C unit access */
diff --git a/include/linux/i2c/i2c-sh_mobile.h b/include/linux/i2c/i2c-sh_mobile.h
index beda7081aead..06e3089795fb 100644
--- a/include/linux/i2c/i2c-sh_mobile.h
+++ b/include/linux/i2c/i2c-sh_mobile.h
@@ -5,6 +5,7 @@
5 5
6struct i2c_sh_mobile_platform_data { 6struct i2c_sh_mobile_platform_data {
7 unsigned long bus_speed; 7 unsigned long bus_speed;
8 unsigned int clks_per_count;
8}; 9};
9 10
10#endif /* __I2C_SH_MOBILE_H__ */ 11#endif /* __I2C_SH_MOBILE_H__ */
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 2c7223d7e73b..86c361e947b9 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -18,6 +18,7 @@ extern int ima_bprm_check(struct linux_binprm *bprm);
18extern int ima_file_check(struct file *file, int mask); 18extern int ima_file_check(struct file *file, int mask);
19extern void ima_file_free(struct file *file); 19extern void ima_file_free(struct file *file);
20extern int ima_file_mmap(struct file *file, unsigned long prot); 20extern int ima_file_mmap(struct file *file, unsigned long prot);
21extern int ima_module_check(struct file *file);
21 22
22#else 23#else
23static inline int ima_bprm_check(struct linux_binprm *bprm) 24static inline int ima_bprm_check(struct linux_binprm *bprm)
@@ -40,6 +41,11 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
40 return 0; 41 return 0;
41} 42}
42 43
44static inline int ima_module_check(struct file *file)
45{
46 return 0;
47}
48
43#endif /* CONFIG_IMA_H */ 49#endif /* CONFIG_IMA_H */
44 50
45#ifdef CONFIG_IMA_APPRAISE 51#ifdef CONFIG_IMA_APPRAISE
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e98a74c0c9c0..0108a56f814e 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -21,11 +21,14 @@
21#define _LINUX_MEMCONTROL_H 21#define _LINUX_MEMCONTROL_H
22#include <linux/cgroup.h> 22#include <linux/cgroup.h>
23#include <linux/vm_event_item.h> 23#include <linux/vm_event_item.h>
24#include <linux/hardirq.h>
25#include <linux/jump_label.h>
24 26
25struct mem_cgroup; 27struct mem_cgroup;
26struct page_cgroup; 28struct page_cgroup;
27struct page; 29struct page;
28struct mm_struct; 30struct mm_struct;
31struct kmem_cache;
29 32
30/* Stats that can be updated by kernel. */ 33/* Stats that can be updated by kernel. */
31enum mem_cgroup_page_stat_item { 34enum mem_cgroup_page_stat_item {
@@ -414,5 +417,211 @@ static inline void sock_release_memcg(struct sock *sk)
414{ 417{
415} 418}
416#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */ 419#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
420
421#ifdef CONFIG_MEMCG_KMEM
422extern struct static_key memcg_kmem_enabled_key;
423
424extern int memcg_limited_groups_array_size;
425
426/*
427 * Helper macro to loop through all memcg-specific caches. Callers must still
428 * check if the cache is valid (it is either valid or NULL).
429 * the slab_mutex must be held when looping through those caches
430 */
431#define for_each_memcg_cache_index(_idx) \
432 for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++)
433
434static inline bool memcg_kmem_enabled(void)
435{
436 return static_key_false(&memcg_kmem_enabled_key);
437}
438
439/*
440 * In general, we'll do everything in our power to not incur in any overhead
441 * for non-memcg users for the kmem functions. Not even a function call, if we
442 * can avoid it.
443 *
444 * Therefore, we'll inline all those functions so that in the best case, we'll
445 * see that kmemcg is off for everybody and proceed quickly. If it is on,
446 * we'll still do most of the flag checking inline. We check a lot of
447 * conditions, but because they are pretty simple, they are expected to be
448 * fast.
449 */
450bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
451 int order);
452void __memcg_kmem_commit_charge(struct page *page,
453 struct mem_cgroup *memcg, int order);
454void __memcg_kmem_uncharge_pages(struct page *page, int order);
455
456int memcg_cache_id(struct mem_cgroup *memcg);
457int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
458 struct kmem_cache *root_cache);
459void memcg_release_cache(struct kmem_cache *cachep);
460void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep);
461
462int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
463void memcg_update_array_size(int num_groups);
464
465struct kmem_cache *
466__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
467
468void mem_cgroup_destroy_cache(struct kmem_cache *cachep);
469void kmem_cache_destroy_memcg_children(struct kmem_cache *s);
470
471/**
472 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
473 * @gfp: the gfp allocation flags.
474 * @memcg: a pointer to the memcg this was charged against.
475 * @order: allocation order.
476 *
477 * returns true if the memcg where the current task belongs can hold this
478 * allocation.
479 *
480 * We return true automatically if this allocation is not to be accounted to
481 * any memcg.
482 */
483static inline bool
484memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
485{
486 if (!memcg_kmem_enabled())
487 return true;
488
489 /*
490 * __GFP_NOFAIL allocations will move on even if charging is not
491 * possible. Therefore we don't even try, and have this allocation
492 * unaccounted. We could in theory charge it with
493 * res_counter_charge_nofail, but we hope those allocations are rare,
494 * and won't be worth the trouble.
495 */
496 if (!(gfp & __GFP_KMEMCG) || (gfp & __GFP_NOFAIL))
497 return true;
498 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
499 return true;
500
501 /* If the test is dying, just let it go. */
502 if (unlikely(fatal_signal_pending(current)))
503 return true;
504
505 return __memcg_kmem_newpage_charge(gfp, memcg, order);
506}
507
508/**
509 * memcg_kmem_uncharge_pages: uncharge pages from memcg
510 * @page: pointer to struct page being freed
511 * @order: allocation order.
512 *
513 * there is no need to specify memcg here, since it is embedded in page_cgroup
514 */
515static inline void
516memcg_kmem_uncharge_pages(struct page *page, int order)
517{
518 if (memcg_kmem_enabled())
519 __memcg_kmem_uncharge_pages(page, order);
520}
521
522/**
523 * memcg_kmem_commit_charge: embeds correct memcg in a page
524 * @page: pointer to struct page recently allocated
525 * @memcg: the memcg structure we charged against
526 * @order: allocation order.
527 *
528 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
529 * failure of the allocation. if @page is NULL, this function will revert the
530 * charges. Otherwise, it will commit the memcg given by @memcg to the
531 * corresponding page_cgroup.
532 */
533static inline void
534memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
535{
536 if (memcg_kmem_enabled() && memcg)
537 __memcg_kmem_commit_charge(page, memcg, order);
538}
539
540/**
541 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
542 * @cachep: the original global kmem cache
543 * @gfp: allocation flags.
544 *
545 * This function assumes that the task allocating, which determines the memcg
546 * in the page allocator, belongs to the same cgroup throughout the whole
547 * process. Misacounting can happen if the task calls memcg_kmem_get_cache()
548 * while belonging to a cgroup, and later on changes. This is considered
549 * acceptable, and should only happen upon task migration.
550 *
551 * Before the cache is created by the memcg core, there is also a possible
552 * imbalance: the task belongs to a memcg, but the cache being allocated from
553 * is the global cache, since the child cache is not yet guaranteed to be
554 * ready. This case is also fine, since in this case the GFP_KMEMCG will not be
555 * passed and the page allocator will not attempt any cgroup accounting.
556 */
557static __always_inline struct kmem_cache *
558memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
559{
560 if (!memcg_kmem_enabled())
561 return cachep;
562 if (gfp & __GFP_NOFAIL)
563 return cachep;
564 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
565 return cachep;
566 if (unlikely(fatal_signal_pending(current)))
567 return cachep;
568
569 return __memcg_kmem_get_cache(cachep, gfp);
570}
571#else
572#define for_each_memcg_cache_index(_idx) \
573 for (; NULL; )
574
575static inline bool memcg_kmem_enabled(void)
576{
577 return false;
578}
579
580static inline bool
581memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
582{
583 return true;
584}
585
586static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
587{
588}
589
590static inline void
591memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
592{
593}
594
595static inline int memcg_cache_id(struct mem_cgroup *memcg)
596{
597 return -1;
598}
599
600static inline int
601memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
602 struct kmem_cache *root_cache)
603{
604 return 0;
605}
606
607static inline void memcg_release_cache(struct kmem_cache *cachep)
608{
609}
610
611static inline void memcg_cache_list_add(struct mem_cgroup *memcg,
612 struct kmem_cache *s)
613{
614}
615
616static inline struct kmem_cache *
617memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
618{
619 return cachep;
620}
621
622static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
623{
624}
625#endif /* CONFIG_MEMCG_KMEM */
417#endif /* _LINUX_MEMCONTROL_H */ 626#endif /* _LINUX_MEMCONTROL_H */
418 627
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index d6a58065c09c..137b4198fc03 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -16,17 +16,15 @@
16/* Chosen so that structs with an unsigned long line up. */ 16/* Chosen so that structs with an unsigned long line up. */
17#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long)) 17#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
18 18
19#define ___module_cat(a,b) __mod_ ## a ## b
20#define __module_cat(a,b) ___module_cat(a,b)
21#ifdef MODULE 19#ifdef MODULE
22#define __MODULE_INFO(tag, name, info) \ 20#define __MODULE_INFO(tag, name, info) \
23static const char __module_cat(name,__LINE__)[] \ 21static const char __UNIQUE_ID(name)[] \
24 __used __attribute__((section(".modinfo"), unused, aligned(1))) \ 22 __used __attribute__((section(".modinfo"), unused, aligned(1))) \
25 = __stringify(tag) "=" info 23 = __stringify(tag) "=" info
26#else /* !MODULE */ 24#else /* !MODULE */
27/* This struct is here for syntactic coherency, it is not used */ 25/* This struct is here for syntactic coherency, it is not used */
28#define __MODULE_INFO(tag, name, info) \ 26#define __MODULE_INFO(tag, name, info) \
29 struct __module_cat(name,__LINE__) {} 27 struct __UNIQUE_ID(name) {}
30#endif 28#endif
31#define __MODULE_PARM_TYPE(name, _type) \ 29#define __MODULE_PARM_TYPE(name, _type) \
32 __MODULE_INFO(parmtype, name##type, #name ":" _type) 30 __MODULE_INFO(parmtype, name##type, #name ":" _type)
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index ed270bd2e4df..4eb0a50d0c55 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -23,6 +23,7 @@
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/kref.h> 24#include <linux/kref.h>
25#include <linux/sysfs.h> 25#include <linux/sysfs.h>
26#include <linux/workqueue.h>
26 27
27struct hd_geometry; 28struct hd_geometry;
28struct mtd_info; 29struct mtd_info;
@@ -43,7 +44,8 @@ struct mtd_blktrans_dev {
43 struct kref ref; 44 struct kref ref;
44 struct gendisk *disk; 45 struct gendisk *disk;
45 struct attribute_group *disk_attributes; 46 struct attribute_group *disk_attributes;
46 struct task_struct *thread; 47 struct workqueue_struct *wq;
48 struct work_struct work;
47 struct request_queue *rq; 49 struct request_queue *rq;
48 spinlock_t queue_lock; 50 spinlock_t queue_lock;
49 void *priv; 51 void *priv;
diff --git a/include/linux/mtd/doc2000.h b/include/linux/mtd/doc2000.h
index 0f6fea73a1f6..407d1e556c39 100644
--- a/include/linux/mtd/doc2000.h
+++ b/include/linux/mtd/doc2000.h
@@ -92,12 +92,26 @@
92 * Others use readb/writeb 92 * Others use readb/writeb
93 */ 93 */
94#if defined(__arm__) 94#if defined(__arm__)
95#define ReadDOC_(adr, reg) ((unsigned char)(*(volatile __u32 *)(((unsigned long)adr)+((reg)<<2)))) 95static inline u8 ReadDOC_(u32 __iomem *addr, unsigned long reg)
96#define WriteDOC_(d, adr, reg) do{ *(volatile __u32 *)(((unsigned long)adr)+((reg)<<2)) = (__u32)d; wmb();} while(0) 96{
97 return __raw_readl(addr + reg);
98}
99static inline void WriteDOC_(u8 data, u32 __iomem *addr, unsigned long reg)
100{
101 __raw_writel(data, addr + reg);
102 wmb();
103}
97#define DOC_IOREMAP_LEN 0x8000 104#define DOC_IOREMAP_LEN 0x8000
98#elif defined(__ppc__) 105#elif defined(__ppc__)
99#define ReadDOC_(adr, reg) ((unsigned char)(*(volatile __u16 *)(((unsigned long)adr)+((reg)<<1)))) 106static inline u8 ReadDOC_(u16 __iomem *addr, unsigned long reg)
100#define WriteDOC_(d, adr, reg) do{ *(volatile __u16 *)(((unsigned long)adr)+((reg)<<1)) = (__u16)d; wmb();} while(0) 107{
108 return __raw_readw(addr + reg);
109}
110static inline void WriteDOC_(u8 data, u16 __iomem *addr, unsigned long reg)
111{
112 __raw_writew(data, addr + reg);
113 wmb();
114}
101#define DOC_IOREMAP_LEN 0x4000 115#define DOC_IOREMAP_LEN 0x4000
102#else 116#else
103#define ReadDOC_(adr, reg) readb((void __iomem *)(adr) + (reg)) 117#define ReadDOC_(adr, reg) readb((void __iomem *)(adr) + (reg))
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
index b20029221fb1..d6ed61ef451d 100644
--- a/include/linux/mtd/fsmc.h
+++ b/include/linux/mtd/fsmc.h
@@ -155,9 +155,6 @@ struct fsmc_nand_platform_data {
155 unsigned int width; 155 unsigned int width;
156 unsigned int bank; 156 unsigned int bank;
157 157
158 /* CLE, ALE offsets */
159 unsigned int cle_off;
160 unsigned int ale_off;
161 enum access_mode mode; 158 enum access_mode mode;
162 159
163 void (*select_bank)(uint32_t bank, uint32_t busw); 160 void (*select_bank)(uint32_t bank, uint32_t busw);
diff --git a/include/linux/mtd/gpmi-nand.h b/include/linux/mtd/gpmi-nand.h
deleted file mode 100644
index ed3c4e09f3d1..000000000000
--- a/include/linux/mtd/gpmi-nand.h
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#ifndef __MACH_MXS_GPMI_NAND_H__
20#define __MACH_MXS_GPMI_NAND_H__
21
22/* The size of the resources is fixed. */
23#define GPMI_NAND_RES_SIZE 6
24
25/* Resource names for the GPMI NAND driver. */
26#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
27#define GPMI_NAND_GPMI_INTERRUPT_RES_NAME "GPMI NAND GPMI Interrupt"
28#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
29#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
30#define GPMI_NAND_DMA_CHANNELS_RES_NAME "GPMI NAND DMA Channels"
31#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "gpmi-dma"
32
33/**
34 * struct gpmi_nand_platform_data - GPMI NAND driver platform data.
35 *
36 * This structure communicates platform-specific information to the GPMI NAND
37 * driver that can't be expressed as resources.
38 *
39 * @platform_init: A pointer to a function the driver will call to
40 * initialize the platform (e.g., set up the pin mux).
41 * @min_prop_delay_in_ns: Minimum propagation delay of GPMI signals to and
42 * from the NAND Flash device, in nanoseconds.
43 * @max_prop_delay_in_ns: Maximum propagation delay of GPMI signals to and
44 * from the NAND Flash device, in nanoseconds.
45 * @max_chip_count: The maximum number of chips for which the driver
46 * should configure the hardware. This value most
47 * likely reflects the number of pins that are
48 * connected to a NAND Flash device. If this is
49 * greater than the SoC hardware can support, the
50 * driver will print a message and fail to initialize.
51 * @partitions: An optional pointer to an array of partition
52 * descriptions.
53 * @partition_count: The number of elements in the partitions array.
54 */
55struct gpmi_nand_platform_data {
56 /* SoC hardware information. */
57 int (*platform_init)(void);
58
59 /* NAND Flash information. */
60 unsigned int min_prop_delay_in_ns;
61 unsigned int max_prop_delay_in_ns;
62 unsigned int max_chip_count;
63
64 /* Medium information. */
65 struct mtd_partition *partitions;
66 unsigned partition_count;
67};
68#endif
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 3595a0236b0f..f6eb4332ac92 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -328,7 +328,7 @@ static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word
328 328
329static inline map_word map_word_load(struct map_info *map, const void *ptr) 329static inline map_word map_word_load(struct map_info *map, const void *ptr)
330{ 330{
331 map_word r; 331 map_word r = {{0} };
332 332
333 if (map_bankwidth_is_1(map)) 333 if (map_bankwidth_is_1(map))
334 r.x[0] = *(unsigned char *)ptr; 334 r.x[0] = *(unsigned char *)ptr;
@@ -391,7 +391,7 @@ static inline map_word map_word_ff(struct map_info *map)
391 391
392static inline map_word inline_map_read(struct map_info *map, unsigned long ofs) 392static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
393{ 393{
394 map_word r; 394 map_word uninitialized_var(r);
395 395
396 if (map_bankwidth_is_1(map)) 396 if (map_bankwidth_is_1(map))
397 r.x[0] = __raw_readb(map->virt + ofs); 397 r.x[0] = __raw_readb(map->virt + ofs);
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 81d61e704599..f9ac2897b86b 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -98,7 +98,7 @@ struct mtd_oob_ops {
98}; 98};
99 99
100#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32 100#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
101#define MTD_MAX_ECCPOS_ENTRIES_LARGE 448 101#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
102/* 102/*
103 * Internal ECC layout control structure. For historical reasons, there is a 103 * Internal ECC layout control structure. For historical reasons, there is a
104 * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained 104 * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 24e915957e4f..7ccb3c59ed60 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -219,6 +219,13 @@ typedef enum {
219#define NAND_OWN_BUFFERS 0x00020000 219#define NAND_OWN_BUFFERS 0x00020000
220/* Chip may not exist, so silence any errors in scan */ 220/* Chip may not exist, so silence any errors in scan */
221#define NAND_SCAN_SILENT_NODEV 0x00040000 221#define NAND_SCAN_SILENT_NODEV 0x00040000
222/*
223 * Autodetect nand buswidth with readid/onfi.
224 * This suppose the driver will configure the hardware in 8 bits mode
225 * when calling nand_scan_ident, and update its configuration
226 * before calling nand_scan_tail.
227 */
228#define NAND_BUSWIDTH_AUTO 0x00080000
222 229
223/* Options set by nand scan */ 230/* Options set by nand scan */
224/* Nand scan has allocated controller struct */ 231/* Nand scan has allocated controller struct */
@@ -471,8 +478,8 @@ struct nand_buffers {
471 * non 0 if ONFI supported. 478 * non 0 if ONFI supported.
472 * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is 479 * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is
473 * supported, 0 otherwise. 480 * supported, 0 otherwise.
474 * @onfi_set_features [REPLACEABLE] set the features for ONFI nand 481 * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
475 * @onfi_get_features [REPLACEABLE] get the features for ONFI nand 482 * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
476 * @ecclayout: [REPLACEABLE] the default ECC placement scheme 483 * @ecclayout: [REPLACEABLE] the default ECC placement scheme
477 * @bbt: [INTERN] bad block table pointer 484 * @bbt: [INTERN] bad block table pointer
478 * @bbt_td: [REPLACEABLE] bad block table descriptor for flash 485 * @bbt_td: [REPLACEABLE] bad block table descriptor for flash
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
index 01e4b15b280e..1c28f8879b1c 100644
--- a/include/linux/mtd/sh_flctl.h
+++ b/include/linux/mtd/sh_flctl.h
@@ -20,6 +20,7 @@
20#ifndef __SH_FLCTL_H__ 20#ifndef __SH_FLCTL_H__
21#define __SH_FLCTL_H__ 21#define __SH_FLCTL_H__
22 22
23#include <linux/completion.h>
23#include <linux/mtd/mtd.h> 24#include <linux/mtd/mtd.h>
24#include <linux/mtd/nand.h> 25#include <linux/mtd/nand.h>
25#include <linux/mtd/partitions.h> 26#include <linux/mtd/partitions.h>
@@ -107,6 +108,7 @@
107#define ESTERINTE (0x1 << 24) /* ECC error interrupt enable */ 108#define ESTERINTE (0x1 << 24) /* ECC error interrupt enable */
108#define AC1CLR (0x1 << 19) /* ECC FIFO clear */ 109#define AC1CLR (0x1 << 19) /* ECC FIFO clear */
109#define AC0CLR (0x1 << 18) /* Data FIFO clear */ 110#define AC0CLR (0x1 << 18) /* Data FIFO clear */
111#define DREQ0EN (0x1 << 16) /* FLDTFIFODMA Request Enable */
110#define ECERB (0x1 << 9) /* ECC error */ 112#define ECERB (0x1 << 9) /* ECC error */
111#define STERB (0x1 << 8) /* Status error */ 113#define STERB (0x1 << 8) /* Status error */
112#define STERINTE (0x1 << 4) /* Status error enable */ 114#define STERINTE (0x1 << 4) /* Status error enable */
@@ -138,6 +140,8 @@ enum flctl_ecc_res_t {
138 FL_TIMEOUT 140 FL_TIMEOUT
139}; 141};
140 142
143struct dma_chan;
144
141struct sh_flctl { 145struct sh_flctl {
142 struct mtd_info mtd; 146 struct mtd_info mtd;
143 struct nand_chip chip; 147 struct nand_chip chip;
@@ -147,7 +151,7 @@ struct sh_flctl {
147 151
148 uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */ 152 uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */
149 int read_bytes; 153 int read_bytes;
150 int index; 154 unsigned int index;
151 int seqin_column; /* column in SEQIN cmd */ 155 int seqin_column; /* column in SEQIN cmd */
152 int seqin_page_addr; /* page_addr in SEQIN cmd */ 156 int seqin_page_addr; /* page_addr in SEQIN cmd */
153 uint32_t seqin_read_cmd; /* read cmd in SEQIN cmd */ 157 uint32_t seqin_read_cmd; /* read cmd in SEQIN cmd */
@@ -161,6 +165,11 @@ struct sh_flctl {
161 unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */ 165 unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */
162 unsigned holden:1; /* Hardware has FLHOLDCR and HOLDEN is set */ 166 unsigned holden:1; /* Hardware has FLHOLDCR and HOLDEN is set */
163 unsigned qos_request:1; /* QoS request to prevent deep power shutdown */ 167 unsigned qos_request:1; /* QoS request to prevent deep power shutdown */
168
169 /* DMA related objects */
170 struct dma_chan *chan_fifo0_rx;
171 struct dma_chan *chan_fifo0_tx;
172 struct completion dma_complete;
164}; 173};
165 174
166struct sh_flctl_platform_data { 175struct sh_flctl_platform_data {
@@ -170,6 +179,9 @@ struct sh_flctl_platform_data {
170 179
171 unsigned has_hwecc:1; 180 unsigned has_hwecc:1;
172 unsigned use_holden:1; 181 unsigned use_holden:1;
182
183 unsigned int slave_id_fifo0_tx;
184 unsigned int slave_id_fifo0_rx;
173}; 185};
174 186
175static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo) 187static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo)
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index b47d2040c9f2..3863a4dbdf18 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -100,6 +100,7 @@ extern int of_platform_populate(struct device_node *root,
100 100
101#if !defined(CONFIG_OF_ADDRESS) 101#if !defined(CONFIG_OF_ADDRESS)
102struct of_dev_auxdata; 102struct of_dev_auxdata;
103struct device;
103static inline int of_platform_populate(struct device_node *root, 104static inline int of_platform_populate(struct device_node *root,
104 const struct of_device_id *matches, 105 const struct of_device_id *matches,
105 const struct of_dev_auxdata *lookup, 106 const struct of_dev_auxdata *lookup,
diff --git a/include/linux/platform_data/i2c-cbus-gpio.h b/include/linux/platform_data/i2c-cbus-gpio.h
new file mode 100644
index 000000000000..6faa992a9502
--- /dev/null
+++ b/include/linux/platform_data/i2c-cbus-gpio.h
@@ -0,0 +1,27 @@
1/*
2 * i2c-cbus-gpio.h - CBUS I2C platform_data definition
3 *
4 * Copyright (C) 2004-2009 Nokia Corporation
5 *
6 * Written by Felipe Balbi and Aaro Koskinen.
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file "COPYING" in the main directory of this
10 * archive for more details.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#ifndef __INCLUDE_LINUX_I2C_CBUS_GPIO_H
19#define __INCLUDE_LINUX_I2C_CBUS_GPIO_H
20
21struct i2c_cbus_platform_data {
22 int dat_gpio;
23 int clk_gpio;
24 int sel_gpio;
25};
26
27#endif /* __INCLUDE_LINUX_I2C_CBUS_GPIO_H */
diff --git a/include/linux/platform_data/mtd-nomadik-nand.h b/include/linux/platform_data/mtd-nomadik-nand.h
deleted file mode 100644
index c3c8254c22a5..000000000000
--- a/include/linux/platform_data/mtd-nomadik-nand.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef __ASM_ARCH_NAND_H
2#define __ASM_ARCH_NAND_H
3
4struct nomadik_nand_platform_data {
5 struct mtd_partition *parts;
6 int nparts;
7 int options;
8 int (*init) (void);
9 int (*exit) (void);
10};
11
12#define NAND_IO_DATA 0x40000000
13#define NAND_IO_CMD 0x40800000
14#define NAND_IO_ADDR 0x41000000
15
16#endif /* __ASM_ARCH_NAND_H */
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 112b31436848..6d661f32e0e4 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -171,6 +171,9 @@ struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
171 unsigned int index, 171 unsigned int index,
172 const char *label); 172 const char *label);
173 173
174struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc,
175 const struct of_phandle_args *args);
176
174struct pwm_device *pwm_get(struct device *dev, const char *consumer); 177struct pwm_device *pwm_get(struct device *dev, const char *consumer);
175void pwm_put(struct pwm_device *pwm); 178void pwm_put(struct pwm_device *pwm);
176 179
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 6f54e40fa218..5ae8456d9670 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -125,14 +125,16 @@ int res_counter_charge_nofail(struct res_counter *counter,
125 * 125 *
126 * these calls check for usage underflow and show a warning on the console 126 * these calls check for usage underflow and show a warning on the console
127 * _locked call expects the counter->lock to be taken 127 * _locked call expects the counter->lock to be taken
128 *
129 * returns the total charges still present in @counter.
128 */ 130 */
129 131
130void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); 132u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
131void res_counter_uncharge(struct res_counter *counter, unsigned long val); 133u64 res_counter_uncharge(struct res_counter *counter, unsigned long val);
132 134
133void res_counter_uncharge_until(struct res_counter *counter, 135u64 res_counter_uncharge_until(struct res_counter *counter,
134 struct res_counter *top, 136 struct res_counter *top,
135 unsigned long val); 137 unsigned long val);
136/** 138/**
137 * res_counter_margin - calculate chargeable space of a counter 139 * res_counter_margin - calculate chargeable space of a counter
138 * @cnt: the counter 140 * @cnt: the counter
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9914c662ed7b..f712465b05c5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1597,6 +1597,7 @@ struct task_struct {
1597 unsigned long nr_pages; /* uncharged usage */ 1597 unsigned long nr_pages; /* uncharged usage */
1598 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ 1598 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1599 } memcg_batch; 1599 } memcg_batch;
1600 unsigned int memcg_kmem_skip_account;
1600#endif 1601#endif
1601#ifdef CONFIG_HAVE_HW_BREAKPOINT 1602#ifdef CONFIG_HAVE_HW_BREAKPOINT
1602 atomic_t ptrace_bp_refcnt; 1603 atomic_t ptrace_bp_refcnt;
diff --git a/include/linux/security.h b/include/linux/security.h
index 05e88bdcf7d9..0f6afc657f77 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -694,6 +694,12 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
694 * userspace to load a kernel module with the given name. 694 * userspace to load a kernel module with the given name.
695 * @kmod_name name of the module requested by the kernel 695 * @kmod_name name of the module requested by the kernel
696 * Return 0 if successful. 696 * Return 0 if successful.
697 * @kernel_module_from_file:
698 * Load a kernel module from userspace.
699 * @file contains the file structure pointing to the file containing
700 * the kernel module to load. If the module is being loaded from a blob,
701 * this argument will be NULL.
702 * Return 0 if permission is granted.
697 * @task_fix_setuid: 703 * @task_fix_setuid:
698 * Update the module's state after setting one or more of the user 704 * Update the module's state after setting one or more of the user
699 * identity attributes of the current process. The @flags parameter 705 * identity attributes of the current process. The @flags parameter
@@ -1508,6 +1514,7 @@ struct security_operations {
1508 int (*kernel_act_as)(struct cred *new, u32 secid); 1514 int (*kernel_act_as)(struct cred *new, u32 secid);
1509 int (*kernel_create_files_as)(struct cred *new, struct inode *inode); 1515 int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
1510 int (*kernel_module_request)(char *kmod_name); 1516 int (*kernel_module_request)(char *kmod_name);
1517 int (*kernel_module_from_file)(struct file *file);
1511 int (*task_fix_setuid) (struct cred *new, const struct cred *old, 1518 int (*task_fix_setuid) (struct cred *new, const struct cred *old,
1512 int flags); 1519 int flags);
1513 int (*task_setpgid) (struct task_struct *p, pid_t pgid); 1520 int (*task_setpgid) (struct task_struct *p, pid_t pgid);
@@ -1765,6 +1772,7 @@ void security_transfer_creds(struct cred *new, const struct cred *old);
1765int security_kernel_act_as(struct cred *new, u32 secid); 1772int security_kernel_act_as(struct cred *new, u32 secid);
1766int security_kernel_create_files_as(struct cred *new, struct inode *inode); 1773int security_kernel_create_files_as(struct cred *new, struct inode *inode);
1767int security_kernel_module_request(char *kmod_name); 1774int security_kernel_module_request(char *kmod_name);
1775int security_kernel_module_from_file(struct file *file);
1768int security_task_fix_setuid(struct cred *new, const struct cred *old, 1776int security_task_fix_setuid(struct cred *new, const struct cred *old,
1769 int flags); 1777 int flags);
1770int security_task_setpgid(struct task_struct *p, pid_t pgid); 1778int security_task_setpgid(struct task_struct *p, pid_t pgid);
@@ -2278,6 +2286,11 @@ static inline int security_kernel_module_request(char *kmod_name)
2278 return 0; 2286 return 0;
2279} 2287}
2280 2288
2289static inline int security_kernel_module_from_file(struct file *file)
2290{
2291 return 0;
2292}
2293
2281static inline int security_task_fix_setuid(struct cred *new, 2294static inline int security_task_fix_setuid(struct cred *new,
2282 const struct cred *old, 2295 const struct cred *old,
2283 int flags) 2296 int flags)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 743a10415122..5d168d7e0a28 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -11,6 +11,8 @@
11 11
12#include <linux/gfp.h> 12#include <linux/gfp.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/workqueue.h>
15
14 16
15/* 17/*
16 * Flags to pass to kmem_cache_create(). 18 * Flags to pass to kmem_cache_create().
@@ -116,6 +118,7 @@ struct kmem_cache {
116}; 118};
117#endif 119#endif
118 120
121struct mem_cgroup;
119/* 122/*
120 * struct kmem_cache related prototypes 123 * struct kmem_cache related prototypes
121 */ 124 */
@@ -125,6 +128,9 @@ int slab_is_available(void);
125struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 128struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
126 unsigned long, 129 unsigned long,
127 void (*)(void *)); 130 void (*)(void *));
131struct kmem_cache *
132kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t,
133 unsigned long, void (*)(void *), struct kmem_cache *);
128void kmem_cache_destroy(struct kmem_cache *); 134void kmem_cache_destroy(struct kmem_cache *);
129int kmem_cache_shrink(struct kmem_cache *); 135int kmem_cache_shrink(struct kmem_cache *);
130void kmem_cache_free(struct kmem_cache *, void *); 136void kmem_cache_free(struct kmem_cache *, void *);
@@ -175,6 +181,48 @@ void kmem_cache_free(struct kmem_cache *, void *);
175#ifndef ARCH_SLAB_MINALIGN 181#ifndef ARCH_SLAB_MINALIGN
176#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 182#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
177#endif 183#endif
184/*
185 * This is the main placeholder for memcg-related information in kmem caches.
186 * struct kmem_cache will hold a pointer to it, so the memory cost while
187 * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
188 * would otherwise be if that would be bundled in kmem_cache: we'll need an
189 * extra pointer chase. But the trade off clearly lays in favor of not
190 * penalizing non-users.
191 *
192 * Both the root cache and the child caches will have it. For the root cache,
193 * this will hold a dynamically allocated array large enough to hold
194 * information about the currently limited memcgs in the system.
195 *
196 * Child caches will hold extra metadata needed for its operation. Fields are:
197 *
198 * @memcg: pointer to the memcg this cache belongs to
199 * @list: list_head for the list of all caches in this memcg
200 * @root_cache: pointer to the global, root cache, this cache was derived from
201 * @dead: set to true after the memcg dies; the cache may still be around.
202 * @nr_pages: number of pages that belongs to this cache.
203 * @destroy: worker to be called whenever we are ready, or believe we may be
204 * ready, to destroy this cache.
205 */
206struct memcg_cache_params {
207 bool is_root_cache;
208 union {
209 struct kmem_cache *memcg_caches[0];
210 struct {
211 struct mem_cgroup *memcg;
212 struct list_head list;
213 struct kmem_cache *root_cache;
214 bool dead;
215 atomic_t nr_pages;
216 struct work_struct destroy;
217 };
218 };
219};
220
221int memcg_update_all_caches(int num_memcgs);
222
223struct seq_file;
224int cache_show(struct kmem_cache *s, struct seq_file *m);
225void print_slabinfo_header(struct seq_file *m);
178 226
179/* 227/*
180 * Common kmalloc functions provided by all allocators 228 * Common kmalloc functions provided by all allocators
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 45c0356fdc8c..8bb6e0eaf3c6 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -81,6 +81,9 @@ struct kmem_cache {
81 */ 81 */
82 int obj_offset; 82 int obj_offset;
83#endif /* CONFIG_DEBUG_SLAB */ 83#endif /* CONFIG_DEBUG_SLAB */
84#ifdef CONFIG_MEMCG_KMEM
85 struct memcg_cache_params *memcg_params;
86#endif
84 87
85/* 6) per-cpu/per-node data, touched during every alloc/free */ 88/* 6) per-cpu/per-node data, touched during every alloc/free */
86 /* 89 /*
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index df448adb7283..9db4825cd393 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -101,6 +101,10 @@ struct kmem_cache {
101#ifdef CONFIG_SYSFS 101#ifdef CONFIG_SYSFS
102 struct kobject kobj; /* For sysfs */ 102 struct kobject kobj; /* For sysfs */
103#endif 103#endif
104#ifdef CONFIG_MEMCG_KMEM
105 struct memcg_cache_params *memcg_params;
106 int max_attr_size; /* for propagation, maximum size of a stored attr */
107#endif
104 108
105#ifdef CONFIG_NUMA 109#ifdef CONFIG_NUMA
106 /* 110 /*
@@ -222,7 +226,10 @@ void *__kmalloc(size_t size, gfp_t flags);
222static __always_inline void * 226static __always_inline void *
223kmalloc_order(size_t size, gfp_t flags, unsigned int order) 227kmalloc_order(size_t size, gfp_t flags, unsigned int order)
224{ 228{
225 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); 229 void *ret;
230
231 flags |= (__GFP_COMP | __GFP_KMEMCG);
232 ret = (void *) __get_free_pages(flags, order);
226 kmemleak_alloc(ret, size, 1, flags); 233 kmemleak_alloc(ret, size, 1, flags);
227 return ret; 234 return ret;
228} 235}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 36c3b07c5119..6caee34bf8a2 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -880,4 +880,5 @@ asmlinkage long sys_process_vm_writev(pid_t pid,
880 880
881asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type, 881asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type,
882 unsigned long idx1, unsigned long idx2); 882 unsigned long idx1, unsigned long idx2);
883asmlinkage long sys_finit_module(int fd, const char __user *uargs, int flags);
883#endif 884#endif
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index ccc1899bd62e..e7e04736802f 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -61,6 +61,8 @@ extern long do_no_restart_syscall(struct restart_block *parm);
61# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK) 61# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK)
62#endif 62#endif
63 63
64#define THREADINFO_GFP_ACCOUNTED (THREADINFO_GFP | __GFP_KMEMCG)
65
64/* 66/*
65 * flag set/clear/test wrappers 67 * flag set/clear/test wrappers
66 * - pass TIF_xxxx constants to these functions 68 * - pass TIF_xxxx constants to these functions
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 9bbeabf66c54..bd45eb7bedc8 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -69,6 +69,7 @@ struct usbnet {
69# define EVENT_DEV_ASLEEP 6 69# define EVENT_DEV_ASLEEP 6
70# define EVENT_DEV_OPEN 7 70# define EVENT_DEV_OPEN 7
71# define EVENT_DEVICE_REPORT_IDLE 8 71# define EVENT_DEVICE_REPORT_IDLE 8
72# define EVENT_NO_RUNTIME_PM 9
72}; 73};
73 74
74static inline struct usb_driver *driver_of(struct usb_interface *intf) 75static inline struct usb_driver *driver_of(struct usb_interface *intf)
@@ -240,4 +241,6 @@ extern void usbnet_set_msglevel(struct net_device *, u32);
240extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); 241extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
241extern int usbnet_nway_reset(struct net_device *net); 242extern int usbnet_nway_reset(struct net_device *net);
242 243
244extern int usbnet_manage_power(struct usbnet *, int);
245
243#endif /* __LINUX_USB_USBNET_H */ 246#endif /* __LINUX_USB_USBNET_H */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index ba1d3615acbb..183292722f6e 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -318,6 +318,7 @@ extern void inet_csk_reqsk_queue_prune(struct sock *parent,
318 const unsigned long max_rto); 318 const unsigned long max_rto);
319 319
320extern void inet_csk_destroy_sock(struct sock *sk); 320extern void inet_csk_destroy_sock(struct sock *sk);
321extern void inet_csk_prepare_forced_close(struct sock *sk);
321 322
322/* 323/*
323 * LISTEN is a special case for poll.. 324 * LISTEN is a special case for poll..
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 7af1ea893038..23b3a7c58783 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -78,6 +78,13 @@ struct ra_msg {
78 __be32 retrans_timer; 78 __be32 retrans_timer;
79}; 79};
80 80
81struct rd_msg {
82 struct icmp6hdr icmph;
83 struct in6_addr target;
84 struct in6_addr dest;
85 __u8 opt[0];
86};
87
81struct nd_opt_hdr { 88struct nd_opt_hdr {
82 __u8 nd_opt_type; 89 __u8 nd_opt_type;
83 __u8 nd_opt_len; 90 __u8 nd_opt_len;
diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h
index d6fd8e5b14b7..1eddbf1557f2 100644
--- a/include/trace/events/gfpflags.h
+++ b/include/trace/events/gfpflags.h
@@ -34,6 +34,7 @@
34 {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ 34 {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \
35 {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ 35 {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \
36 {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ 36 {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
37 {(unsigned long)__GFP_KMEMCG, "GFP_KMEMCG"}, \
37 {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \ 38 {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
38 {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \ 39 {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
39 {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \ 40 {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 6e595ba545f4..2c531f478410 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -690,9 +690,11 @@ __SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
690 compat_sys_process_vm_writev) 690 compat_sys_process_vm_writev)
691#define __NR_kcmp 272 691#define __NR_kcmp 272
692__SYSCALL(__NR_kcmp, sys_kcmp) 692__SYSCALL(__NR_kcmp, sys_kcmp)
693#define __NR_finit_module 273
694__SYSCALL(__NR_finit_module, sys_finit_module)
693 695
694#undef __NR_syscalls 696#undef __NR_syscalls
695#define __NR_syscalls 273 697#define __NR_syscalls 274
696 698
697/* 699/*
698 * All syscalls below here should go away really, 700 * All syscalls below here should go away really,
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index afbb18a0227c..5db297514aec 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -163,6 +163,9 @@ struct br_port_msg {
163 163
164struct br_mdb_entry { 164struct br_mdb_entry {
165 __u32 ifindex; 165 __u32 ifindex;
166#define MDB_TEMPORARY 0
167#define MDB_PERMANENT 1
168 __u8 state;
166 struct { 169 struct {
167 union { 170 union {
168 __be32 ip4; 171 __be32 ip4;
diff --git a/include/uapi/linux/module.h b/include/uapi/linux/module.h
new file mode 100644
index 000000000000..38da4258b12f
--- /dev/null
+++ b/include/uapi/linux/module.h
@@ -0,0 +1,8 @@
1#ifndef _UAPI_LINUX_MODULE_H
2#define _UAPI_LINUX_MODULE_H
3
4/* Flags for sys_finit_module: */
5#define MODULE_INIT_IGNORE_MODVERSIONS 1
6#define MODULE_INIT_IGNORE_VERMAGIC 2
7
8#endif /* _UAPI_LINUX_MODULE_H */
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index e811474724c2..0e011eb91b5d 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -45,7 +45,9 @@
45 45
46static inline __attribute_const__ __u16 __fswab16(__u16 val) 46static inline __attribute_const__ __u16 __fswab16(__u16 val)
47{ 47{
48#ifdef __arch_swab16 48#ifdef __HAVE_BUILTIN_BSWAP16__
49 return __builtin_bswap16(val);
50#elif defined (__arch_swab16)
49 return __arch_swab16(val); 51 return __arch_swab16(val);
50#else 52#else
51 return ___constant_swab16(val); 53 return ___constant_swab16(val);
@@ -54,7 +56,9 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
54 56
55static inline __attribute_const__ __u32 __fswab32(__u32 val) 57static inline __attribute_const__ __u32 __fswab32(__u32 val)
56{ 58{
57#ifdef __arch_swab32 59#ifdef __HAVE_BUILTIN_BSWAP32__
60 return __builtin_bswap32(val);
61#elif defined(__arch_swab32)
58 return __arch_swab32(val); 62 return __arch_swab32(val);
59#else 63#else
60 return ___constant_swab32(val); 64 return ___constant_swab32(val);
@@ -63,7 +67,9 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
63 67
64static inline __attribute_const__ __u64 __fswab64(__u64 val) 68static inline __attribute_const__ __u64 __fswab64(__u64 val)
65{ 69{
66#ifdef __arch_swab64 70#ifdef __HAVE_BUILTIN_BSWAP64__
71 return __builtin_bswap64(val);
72#elif defined (__arch_swab64)
67 return __arch_swab64(val); 73 return __arch_swab64(val);
68#elif defined(__SWAB_64_THRU_32__) 74#elif defined(__SWAB_64_THRU_32__)
69 __u32 h = val >> 32; 75 __u32 h = val >> 32;
diff --git a/init/Kconfig b/init/Kconfig
index 675d8a2326cf..7d30240e5bfe 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -882,7 +882,7 @@ config MEMCG_SWAP_ENABLED
882config MEMCG_KMEM 882config MEMCG_KMEM
883 bool "Memory Resource Controller Kernel Memory accounting (EXPERIMENTAL)" 883 bool "Memory Resource Controller Kernel Memory accounting (EXPERIMENTAL)"
884 depends on MEMCG && EXPERIMENTAL 884 depends on MEMCG && EXPERIMENTAL
885 default n 885 depends on SLUB || SLAB
886 help 886 help
887 The Kernel Memory extension for Memory Resource Controller can limit 887 The Kernel Memory extension for Memory Resource Controller can limit
888 the amount of memory used by kernel objects in the system. Those are 888 the amount of memory used by kernel objects in the system. Those are
diff --git a/kernel/Makefile b/kernel/Makefile
index ac0d533eb7de..6c072b6da239 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -54,7 +54,7 @@ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
54obj-$(CONFIG_PROVE_LOCKING) += spinlock.o 54obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
55obj-$(CONFIG_UID16) += uid16.o 55obj-$(CONFIG_UID16) += uid16.o
56obj-$(CONFIG_MODULES) += module.o 56obj-$(CONFIG_MODULES) += module.o
57obj-$(CONFIG_MODULE_SIG) += module_signing.o modsign_pubkey.o 57obj-$(CONFIG_MODULE_SIG) += module_signing.o modsign_pubkey.o modsign_certificate.o
58obj-$(CONFIG_KALLSYMS) += kallsyms.o 58obj-$(CONFIG_KALLSYMS) += kallsyms.o
59obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o 59obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
60obj-$(CONFIG_KEXEC) += kexec.o 60obj-$(CONFIG_KEXEC) += kexec.o
@@ -137,10 +137,14 @@ ifeq ($(CONFIG_MODULE_SIG),y)
137# 137#
138# Pull the signing certificate and any extra certificates into the kernel 138# Pull the signing certificate and any extra certificates into the kernel
139# 139#
140
141quiet_cmd_touch = TOUCH $@
142 cmd_touch = touch $@
143
140extra_certificates: 144extra_certificates:
141 touch $@ 145 $(call cmd,touch)
142 146
143kernel/modsign_pubkey.o: signing_key.x509 extra_certificates 147kernel/modsign_certificate.o: signing_key.x509 extra_certificates
144 148
145############################################################################### 149###############################################################################
146# 150#
diff --git a/kernel/fork.c b/kernel/fork.c
index c36c4e301efe..85f6d536608d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -146,7 +146,7 @@ void __weak arch_release_thread_info(struct thread_info *ti)
146static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, 146static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
147 int node) 147 int node)
148{ 148{
149 struct page *page = alloc_pages_node(node, THREADINFO_GFP, 149 struct page *page = alloc_pages_node(node, THREADINFO_GFP_ACCOUNTED,
150 THREAD_SIZE_ORDER); 150 THREAD_SIZE_ORDER);
151 151
152 return page ? page_address(page) : NULL; 152 return page ? page_address(page) : NULL;
@@ -154,7 +154,7 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
154 154
155static inline void free_thread_info(struct thread_info *ti) 155static inline void free_thread_info(struct thread_info *ti)
156{ 156{
157 free_pages((unsigned long)ti, THREAD_SIZE_ORDER); 157 free_memcg_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
158} 158}
159# else 159# else
160static struct kmem_cache *thread_info_cache; 160static struct kmem_cache *thread_info_cache;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 35c70c9e24d8..e49a288fa479 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -818,7 +818,7 @@ static void irq_thread_dtor(struct callback_head *unused)
818 action = kthread_data(tsk); 818 action = kthread_data(tsk);
819 819
820 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 820 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
821 tsk->comm ? tsk->comm : "", tsk->pid, action->irq); 821 tsk->comm, tsk->pid, action->irq);
822 822
823 823
824 desc = irq_to_desc(action->irq); 824 desc = irq_to_desc(action->irq);
diff --git a/kernel/modsign_certificate.S b/kernel/modsign_certificate.S
new file mode 100644
index 000000000000..246b4c6e6135
--- /dev/null
+++ b/kernel/modsign_certificate.S
@@ -0,0 +1,19 @@
1/* SYMBOL_PREFIX defined on commandline from CONFIG_SYMBOL_PREFIX */
2#ifndef SYMBOL_PREFIX
3#define ASM_SYMBOL(sym) sym
4#else
5#define PASTE2(x,y) x##y
6#define PASTE(x,y) PASTE2(x,y)
7#define ASM_SYMBOL(sym) PASTE(SYMBOL_PREFIX, sym)
8#endif
9
10#define GLOBAL(name) \
11 .globl ASM_SYMBOL(name); \
12 ASM_SYMBOL(name):
13
14 .section ".init.data","aw"
15
16GLOBAL(modsign_certificate_list)
17 .incbin "signing_key.x509"
18 .incbin "extra_certificates"
19GLOBAL(modsign_certificate_list_end)
diff --git a/kernel/modsign_pubkey.c b/kernel/modsign_pubkey.c
index 767e559dfb10..045504fffbb2 100644
--- a/kernel/modsign_pubkey.c
+++ b/kernel/modsign_pubkey.c
@@ -20,12 +20,6 @@ struct key *modsign_keyring;
20 20
21extern __initdata const u8 modsign_certificate_list[]; 21extern __initdata const u8 modsign_certificate_list[];
22extern __initdata const u8 modsign_certificate_list_end[]; 22extern __initdata const u8 modsign_certificate_list_end[];
23asm(".section .init.data,\"aw\"\n"
24 SYMBOL_PREFIX "modsign_certificate_list:\n"
25 ".incbin \"signing_key.x509\"\n"
26 ".incbin \"extra_certificates\"\n"
27 SYMBOL_PREFIX "modsign_certificate_list_end:"
28 );
29 23
30/* 24/*
31 * We need to make sure ccache doesn't cache the .o file as it doesn't notice 25 * We need to make sure ccache doesn't cache the .o file as it doesn't notice
diff --git a/kernel/module.c b/kernel/module.c
index 808bd62e1723..250092c1d57d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -21,6 +21,7 @@
21#include <linux/ftrace_event.h> 21#include <linux/ftrace_event.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/kallsyms.h> 23#include <linux/kallsyms.h>
24#include <linux/file.h>
24#include <linux/fs.h> 25#include <linux/fs.h>
25#include <linux/sysfs.h> 26#include <linux/sysfs.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
@@ -28,6 +29,7 @@
28#include <linux/vmalloc.h> 29#include <linux/vmalloc.h>
29#include <linux/elf.h> 30#include <linux/elf.h>
30#include <linux/proc_fs.h> 31#include <linux/proc_fs.h>
32#include <linux/security.h>
31#include <linux/seq_file.h> 33#include <linux/seq_file.h>
32#include <linux/syscalls.h> 34#include <linux/syscalls.h>
33#include <linux/fcntl.h> 35#include <linux/fcntl.h>
@@ -59,6 +61,7 @@
59#include <linux/pfn.h> 61#include <linux/pfn.h>
60#include <linux/bsearch.h> 62#include <linux/bsearch.h>
61#include <linux/fips.h> 63#include <linux/fips.h>
64#include <uapi/linux/module.h>
62#include "module-internal.h" 65#include "module-internal.h"
63 66
64#define CREATE_TRACE_POINTS 67#define CREATE_TRACE_POINTS
@@ -2279,7 +2282,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
2279 Elf_Shdr *symsect = info->sechdrs + info->index.sym; 2282 Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2280 Elf_Shdr *strsect = info->sechdrs + info->index.str; 2283 Elf_Shdr *strsect = info->sechdrs + info->index.str;
2281 const Elf_Sym *src; 2284 const Elf_Sym *src;
2282 unsigned int i, nsrc, ndst, strtab_size; 2285 unsigned int i, nsrc, ndst, strtab_size = 0;
2283 2286
2284 /* Put symbol section at end of init part of module. */ 2287 /* Put symbol section at end of init part of module. */
2285 symsect->sh_flags |= SHF_ALLOC; 2288 symsect->sh_flags |= SHF_ALLOC;
@@ -2290,9 +2293,6 @@ static void layout_symtab(struct module *mod, struct load_info *info)
2290 src = (void *)info->hdr + symsect->sh_offset; 2293 src = (void *)info->hdr + symsect->sh_offset;
2291 nsrc = symsect->sh_size / sizeof(*src); 2294 nsrc = symsect->sh_size / sizeof(*src);
2292 2295
2293 /* strtab always starts with a nul, so offset 0 is the empty string. */
2294 strtab_size = 1;
2295
2296 /* Compute total space required for the core symbols' strtab. */ 2296 /* Compute total space required for the core symbols' strtab. */
2297 for (ndst = i = 0; i < nsrc; i++) { 2297 for (ndst = i = 0; i < nsrc; i++) {
2298 if (i == 0 || 2298 if (i == 0 ||
@@ -2334,7 +2334,6 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
2334 mod->core_symtab = dst = mod->module_core + info->symoffs; 2334 mod->core_symtab = dst = mod->module_core + info->symoffs;
2335 mod->core_strtab = s = mod->module_core + info->stroffs; 2335 mod->core_strtab = s = mod->module_core + info->stroffs;
2336 src = mod->symtab; 2336 src = mod->symtab;
2337 *s++ = 0;
2338 for (ndst = i = 0; i < mod->num_symtab; i++) { 2337 for (ndst = i = 0; i < mod->num_symtab; i++) {
2339 if (i == 0 || 2338 if (i == 0 ||
2340 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { 2339 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
@@ -2375,7 +2374,7 @@ static void dynamic_debug_remove(struct _ddebug *debug)
2375 2374
2376void * __weak module_alloc(unsigned long size) 2375void * __weak module_alloc(unsigned long size)
2377{ 2376{
2378 return size == 0 ? NULL : vmalloc_exec(size); 2377 return vmalloc_exec(size);
2379} 2378}
2380 2379
2381static void *module_alloc_update_bounds(unsigned long size) 2380static void *module_alloc_update_bounds(unsigned long size)
@@ -2422,18 +2421,17 @@ static inline void kmemleak_load_module(const struct module *mod,
2422#endif 2421#endif
2423 2422
2424#ifdef CONFIG_MODULE_SIG 2423#ifdef CONFIG_MODULE_SIG
2425static int module_sig_check(struct load_info *info, 2424static int module_sig_check(struct load_info *info)
2426 const void *mod, unsigned long *_len)
2427{ 2425{
2428 int err = -ENOKEY; 2426 int err = -ENOKEY;
2429 unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; 2427 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2430 unsigned long len = *_len; 2428 const void *mod = info->hdr;
2431 2429
2432 if (len > markerlen && 2430 if (info->len > markerlen &&
2433 memcmp(mod + len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { 2431 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2434 /* We truncate the module to discard the signature */ 2432 /* We truncate the module to discard the signature */
2435 *_len -= markerlen; 2433 info->len -= markerlen;
2436 err = mod_verify_sig(mod, _len); 2434 err = mod_verify_sig(mod, &info->len);
2437 } 2435 }
2438 2436
2439 if (!err) { 2437 if (!err) {
@@ -2451,59 +2449,107 @@ static int module_sig_check(struct load_info *info,
2451 return err; 2449 return err;
2452} 2450}
2453#else /* !CONFIG_MODULE_SIG */ 2451#else /* !CONFIG_MODULE_SIG */
2454static int module_sig_check(struct load_info *info, 2452static int module_sig_check(struct load_info *info)
2455 void *mod, unsigned long *len)
2456{ 2453{
2457 return 0; 2454 return 0;
2458} 2455}
2459#endif /* !CONFIG_MODULE_SIG */ 2456#endif /* !CONFIG_MODULE_SIG */
2460 2457
2461/* Sets info->hdr, info->len and info->sig_ok. */ 2458/* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2462static int copy_and_check(struct load_info *info, 2459static int elf_header_check(struct load_info *info)
2463 const void __user *umod, unsigned long len, 2460{
2464 const char __user *uargs) 2461 if (info->len < sizeof(*(info->hdr)))
2462 return -ENOEXEC;
2463
2464 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2465 || info->hdr->e_type != ET_REL
2466 || !elf_check_arch(info->hdr)
2467 || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2468 return -ENOEXEC;
2469
2470 if (info->hdr->e_shoff >= info->len
2471 || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2472 info->len - info->hdr->e_shoff))
2473 return -ENOEXEC;
2474
2475 return 0;
2476}
2477
2478/* Sets info->hdr and info->len. */
2479static int copy_module_from_user(const void __user *umod, unsigned long len,
2480 struct load_info *info)
2465{ 2481{
2466 int err; 2482 int err;
2467 Elf_Ehdr *hdr;
2468 2483
2469 if (len < sizeof(*hdr)) 2484 info->len = len;
2485 if (info->len < sizeof(*(info->hdr)))
2470 return -ENOEXEC; 2486 return -ENOEXEC;
2471 2487
2488 err = security_kernel_module_from_file(NULL);
2489 if (err)
2490 return err;
2491
2472 /* Suck in entire file: we'll want most of it. */ 2492 /* Suck in entire file: we'll want most of it. */
2473 if ((hdr = vmalloc(len)) == NULL) 2493 info->hdr = vmalloc(info->len);
2494 if (!info->hdr)
2474 return -ENOMEM; 2495 return -ENOMEM;
2475 2496
2476 if (copy_from_user(hdr, umod, len) != 0) { 2497 if (copy_from_user(info->hdr, umod, info->len) != 0) {
2477 err = -EFAULT; 2498 vfree(info->hdr);
2478 goto free_hdr; 2499 return -EFAULT;
2479 } 2500 }
2480 2501
2481 err = module_sig_check(info, hdr, &len); 2502 return 0;
2503}
2504
2505/* Sets info->hdr and info->len. */
2506static int copy_module_from_fd(int fd, struct load_info *info)
2507{
2508 struct file *file;
2509 int err;
2510 struct kstat stat;
2511 loff_t pos;
2512 ssize_t bytes = 0;
2513
2514 file = fget(fd);
2515 if (!file)
2516 return -ENOEXEC;
2517
2518 err = security_kernel_module_from_file(file);
2482 if (err) 2519 if (err)
2483 goto free_hdr; 2520 goto out;
2484 2521
2485 /* Sanity checks against insmoding binaries or wrong arch, 2522 err = vfs_getattr(file->f_vfsmnt, file->f_dentry, &stat);
2486 weird elf version */ 2523 if (err)
2487 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0 2524 goto out;
2488 || hdr->e_type != ET_REL
2489 || !elf_check_arch(hdr)
2490 || hdr->e_shentsize != sizeof(Elf_Shdr)) {
2491 err = -ENOEXEC;
2492 goto free_hdr;
2493 }
2494 2525
2495 if (hdr->e_shoff >= len || 2526 if (stat.size > INT_MAX) {
2496 hdr->e_shnum * sizeof(Elf_Shdr) > len - hdr->e_shoff) { 2527 err = -EFBIG;
2497 err = -ENOEXEC; 2528 goto out;
2498 goto free_hdr; 2529 }
2530 info->hdr = vmalloc(stat.size);
2531 if (!info->hdr) {
2532 err = -ENOMEM;
2533 goto out;
2499 } 2534 }
2500 2535
2501 info->hdr = hdr; 2536 pos = 0;
2502 info->len = len; 2537 while (pos < stat.size) {
2503 return 0; 2538 bytes = kernel_read(file, pos, (char *)(info->hdr) + pos,
2539 stat.size - pos);
2540 if (bytes < 0) {
2541 vfree(info->hdr);
2542 err = bytes;
2543 goto out;
2544 }
2545 if (bytes == 0)
2546 break;
2547 pos += bytes;
2548 }
2549 info->len = pos;
2504 2550
2505free_hdr: 2551out:
2506 vfree(hdr); 2552 fput(file);
2507 return err; 2553 return err;
2508} 2554}
2509 2555
@@ -2512,7 +2558,7 @@ static void free_copy(struct load_info *info)
2512 vfree(info->hdr); 2558 vfree(info->hdr);
2513} 2559}
2514 2560
2515static int rewrite_section_headers(struct load_info *info) 2561static int rewrite_section_headers(struct load_info *info, int flags)
2516{ 2562{
2517 unsigned int i; 2563 unsigned int i;
2518 2564
@@ -2540,7 +2586,10 @@ static int rewrite_section_headers(struct load_info *info)
2540 } 2586 }
2541 2587
2542 /* Track but don't keep modinfo and version sections. */ 2588 /* Track but don't keep modinfo and version sections. */
2543 info->index.vers = find_sec(info, "__versions"); 2589 if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
2590 info->index.vers = 0; /* Pretend no __versions section! */
2591 else
2592 info->index.vers = find_sec(info, "__versions");
2544 info->index.info = find_sec(info, ".modinfo"); 2593 info->index.info = find_sec(info, ".modinfo");
2545 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; 2594 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2546 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; 2595 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
@@ -2555,7 +2604,7 @@ static int rewrite_section_headers(struct load_info *info)
2555 * Return the temporary module pointer (we'll replace it with the final 2604 * Return the temporary module pointer (we'll replace it with the final
2556 * one when we move the module sections around). 2605 * one when we move the module sections around).
2557 */ 2606 */
2558static struct module *setup_load_info(struct load_info *info) 2607static struct module *setup_load_info(struct load_info *info, int flags)
2559{ 2608{
2560 unsigned int i; 2609 unsigned int i;
2561 int err; 2610 int err;
@@ -2566,7 +2615,7 @@ static struct module *setup_load_info(struct load_info *info)
2566 info->secstrings = (void *)info->hdr 2615 info->secstrings = (void *)info->hdr
2567 + info->sechdrs[info->hdr->e_shstrndx].sh_offset; 2616 + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2568 2617
2569 err = rewrite_section_headers(info); 2618 err = rewrite_section_headers(info, flags);
2570 if (err) 2619 if (err)
2571 return ERR_PTR(err); 2620 return ERR_PTR(err);
2572 2621
@@ -2604,11 +2653,14 @@ static struct module *setup_load_info(struct load_info *info)
2604 return mod; 2653 return mod;
2605} 2654}
2606 2655
2607static int check_modinfo(struct module *mod, struct load_info *info) 2656static int check_modinfo(struct module *mod, struct load_info *info, int flags)
2608{ 2657{
2609 const char *modmagic = get_modinfo(info, "vermagic"); 2658 const char *modmagic = get_modinfo(info, "vermagic");
2610 int err; 2659 int err;
2611 2660
2661 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
2662 modmagic = NULL;
2663
2612 /* This is allowed: modprobe --force will invalidate it. */ 2664 /* This is allowed: modprobe --force will invalidate it. */
2613 if (!modmagic) { 2665 if (!modmagic) {
2614 err = try_to_force_load(mod, "bad vermagic"); 2666 err = try_to_force_load(mod, "bad vermagic");
@@ -2738,20 +2790,23 @@ static int move_module(struct module *mod, struct load_info *info)
2738 memset(ptr, 0, mod->core_size); 2790 memset(ptr, 0, mod->core_size);
2739 mod->module_core = ptr; 2791 mod->module_core = ptr;
2740 2792
2741 ptr = module_alloc_update_bounds(mod->init_size); 2793 if (mod->init_size) {
2742 /* 2794 ptr = module_alloc_update_bounds(mod->init_size);
2743 * The pointer to this block is stored in the module structure 2795 /*
2744 * which is inside the block. This block doesn't need to be 2796 * The pointer to this block is stored in the module structure
2745 * scanned as it contains data and code that will be freed 2797 * which is inside the block. This block doesn't need to be
2746 * after the module is initialized. 2798 * scanned as it contains data and code that will be freed
2747 */ 2799 * after the module is initialized.
2748 kmemleak_ignore(ptr); 2800 */
2749 if (!ptr && mod->init_size) { 2801 kmemleak_ignore(ptr);
2750 module_free(mod, mod->module_core); 2802 if (!ptr) {
2751 return -ENOMEM; 2803 module_free(mod, mod->module_core);
2752 } 2804 return -ENOMEM;
2753 memset(ptr, 0, mod->init_size); 2805 }
2754 mod->module_init = ptr; 2806 memset(ptr, 0, mod->init_size);
2807 mod->module_init = ptr;
2808 } else
2809 mod->module_init = NULL;
2755 2810
2756 /* Transfer each section which specifies SHF_ALLOC */ 2811 /* Transfer each section which specifies SHF_ALLOC */
2757 pr_debug("final section addresses:\n"); 2812 pr_debug("final section addresses:\n");
@@ -2844,18 +2899,18 @@ int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
2844 return 0; 2899 return 0;
2845} 2900}
2846 2901
2847static struct module *layout_and_allocate(struct load_info *info) 2902static struct module *layout_and_allocate(struct load_info *info, int flags)
2848{ 2903{
2849 /* Module within temporary copy. */ 2904 /* Module within temporary copy. */
2850 struct module *mod; 2905 struct module *mod;
2851 Elf_Shdr *pcpusec; 2906 Elf_Shdr *pcpusec;
2852 int err; 2907 int err;
2853 2908
2854 mod = setup_load_info(info); 2909 mod = setup_load_info(info, flags);
2855 if (IS_ERR(mod)) 2910 if (IS_ERR(mod))
2856 return mod; 2911 return mod;
2857 2912
2858 err = check_modinfo(mod, info); 2913 err = check_modinfo(mod, info, flags);
2859 if (err) 2914 if (err)
2860 return ERR_PTR(err); 2915 return ERR_PTR(err);
2861 2916
@@ -2942,33 +2997,124 @@ static bool finished_loading(const char *name)
2942 return ret; 2997 return ret;
2943} 2998}
2944 2999
3000/* Call module constructors. */
3001static void do_mod_ctors(struct module *mod)
3002{
3003#ifdef CONFIG_CONSTRUCTORS
3004 unsigned long i;
3005
3006 for (i = 0; i < mod->num_ctors; i++)
3007 mod->ctors[i]();
3008#endif
3009}
3010
3011/* This is where the real work happens */
3012static int do_init_module(struct module *mod)
3013{
3014 int ret = 0;
3015
3016 blocking_notifier_call_chain(&module_notify_list,
3017 MODULE_STATE_COMING, mod);
3018
3019 /* Set RO and NX regions for core */
3020 set_section_ro_nx(mod->module_core,
3021 mod->core_text_size,
3022 mod->core_ro_size,
3023 mod->core_size);
3024
3025 /* Set RO and NX regions for init */
3026 set_section_ro_nx(mod->module_init,
3027 mod->init_text_size,
3028 mod->init_ro_size,
3029 mod->init_size);
3030
3031 do_mod_ctors(mod);
3032 /* Start the module */
3033 if (mod->init != NULL)
3034 ret = do_one_initcall(mod->init);
3035 if (ret < 0) {
3036 /* Init routine failed: abort. Try to protect us from
3037 buggy refcounters. */
3038 mod->state = MODULE_STATE_GOING;
3039 synchronize_sched();
3040 module_put(mod);
3041 blocking_notifier_call_chain(&module_notify_list,
3042 MODULE_STATE_GOING, mod);
3043 free_module(mod);
3044 wake_up_all(&module_wq);
3045 return ret;
3046 }
3047 if (ret > 0) {
3048 printk(KERN_WARNING
3049"%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n"
3050"%s: loading module anyway...\n",
3051 __func__, mod->name, ret,
3052 __func__);
3053 dump_stack();
3054 }
3055
3056 /* Now it's a first class citizen! */
3057 mod->state = MODULE_STATE_LIVE;
3058 blocking_notifier_call_chain(&module_notify_list,
3059 MODULE_STATE_LIVE, mod);
3060
3061 /* We need to finish all async code before the module init sequence is done */
3062 async_synchronize_full();
3063
3064 mutex_lock(&module_mutex);
3065 /* Drop initial reference. */
3066 module_put(mod);
3067 trim_init_extable(mod);
3068#ifdef CONFIG_KALLSYMS
3069 mod->num_symtab = mod->core_num_syms;
3070 mod->symtab = mod->core_symtab;
3071 mod->strtab = mod->core_strtab;
3072#endif
3073 unset_module_init_ro_nx(mod);
3074 module_free(mod, mod->module_init);
3075 mod->module_init = NULL;
3076 mod->init_size = 0;
3077 mod->init_ro_size = 0;
3078 mod->init_text_size = 0;
3079 mutex_unlock(&module_mutex);
3080 wake_up_all(&module_wq);
3081
3082 return 0;
3083}
3084
3085static int may_init_module(void)
3086{
3087 if (!capable(CAP_SYS_MODULE) || modules_disabled)
3088 return -EPERM;
3089
3090 return 0;
3091}
3092
2945/* Allocate and load the module: note that size of section 0 is always 3093/* Allocate and load the module: note that size of section 0 is always
2946 zero, and we rely on this for optional sections. */ 3094 zero, and we rely on this for optional sections. */
2947static struct module *load_module(void __user *umod, 3095static int load_module(struct load_info *info, const char __user *uargs,
2948 unsigned long len, 3096 int flags)
2949 const char __user *uargs)
2950{ 3097{
2951 struct load_info info = { NULL, };
2952 struct module *mod, *old; 3098 struct module *mod, *old;
2953 long err; 3099 long err;
2954 3100
2955 pr_debug("load_module: umod=%p, len=%lu, uargs=%p\n", 3101 err = module_sig_check(info);
2956 umod, len, uargs); 3102 if (err)
3103 goto free_copy;
2957 3104
2958 /* Copy in the blobs from userspace, check they are vaguely sane. */ 3105 err = elf_header_check(info);
2959 err = copy_and_check(&info, umod, len, uargs);
2960 if (err) 3106 if (err)
2961 return ERR_PTR(err); 3107 goto free_copy;
2962 3108
2963 /* Figure out module layout, and allocate all the memory. */ 3109 /* Figure out module layout, and allocate all the memory. */
2964 mod = layout_and_allocate(&info); 3110 mod = layout_and_allocate(info, flags);
2965 if (IS_ERR(mod)) { 3111 if (IS_ERR(mod)) {
2966 err = PTR_ERR(mod); 3112 err = PTR_ERR(mod);
2967 goto free_copy; 3113 goto free_copy;
2968 } 3114 }
2969 3115
2970#ifdef CONFIG_MODULE_SIG 3116#ifdef CONFIG_MODULE_SIG
2971 mod->sig_ok = info.sig_ok; 3117 mod->sig_ok = info->sig_ok;
2972 if (!mod->sig_ok) 3118 if (!mod->sig_ok)
2973 add_taint_module(mod, TAINT_FORCED_MODULE); 3119 add_taint_module(mod, TAINT_FORCED_MODULE);
2974#endif 3120#endif
@@ -2980,25 +3126,25 @@ static struct module *load_module(void __user *umod,
2980 3126
2981 /* Now we've got everything in the final locations, we can 3127 /* Now we've got everything in the final locations, we can
2982 * find optional sections. */ 3128 * find optional sections. */
2983 find_module_sections(mod, &info); 3129 find_module_sections(mod, info);
2984 3130
2985 err = check_module_license_and_versions(mod); 3131 err = check_module_license_and_versions(mod);
2986 if (err) 3132 if (err)
2987 goto free_unload; 3133 goto free_unload;
2988 3134
2989 /* Set up MODINFO_ATTR fields */ 3135 /* Set up MODINFO_ATTR fields */
2990 setup_modinfo(mod, &info); 3136 setup_modinfo(mod, info);
2991 3137
2992 /* Fix up syms, so that st_value is a pointer to location. */ 3138 /* Fix up syms, so that st_value is a pointer to location. */
2993 err = simplify_symbols(mod, &info); 3139 err = simplify_symbols(mod, info);
2994 if (err < 0) 3140 if (err < 0)
2995 goto free_modinfo; 3141 goto free_modinfo;
2996 3142
2997 err = apply_relocations(mod, &info); 3143 err = apply_relocations(mod, info);
2998 if (err < 0) 3144 if (err < 0)
2999 goto free_modinfo; 3145 goto free_modinfo;
3000 3146
3001 err = post_relocation(mod, &info); 3147 err = post_relocation(mod, info);
3002 if (err < 0) 3148 if (err < 0)
3003 goto free_modinfo; 3149 goto free_modinfo;
3004 3150
@@ -3038,14 +3184,14 @@ again:
3038 } 3184 }
3039 3185
3040 /* This has to be done once we're sure module name is unique. */ 3186 /* This has to be done once we're sure module name is unique. */
3041 dynamic_debug_setup(info.debug, info.num_debug); 3187 dynamic_debug_setup(info->debug, info->num_debug);
3042 3188
3043 /* Find duplicate symbols */ 3189 /* Find duplicate symbols */
3044 err = verify_export_symbols(mod); 3190 err = verify_export_symbols(mod);
3045 if (err < 0) 3191 if (err < 0)
3046 goto ddebug; 3192 goto ddebug;
3047 3193
3048 module_bug_finalize(info.hdr, info.sechdrs, mod); 3194 module_bug_finalize(info->hdr, info->sechdrs, mod);
3049 list_add_rcu(&mod->list, &modules); 3195 list_add_rcu(&mod->list, &modules);
3050 mutex_unlock(&module_mutex); 3196 mutex_unlock(&module_mutex);
3051 3197
@@ -3056,16 +3202,17 @@ again:
3056 goto unlink; 3202 goto unlink;
3057 3203
3058 /* Link in to syfs. */ 3204 /* Link in to syfs. */
3059 err = mod_sysfs_setup(mod, &info, mod->kp, mod->num_kp); 3205 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3060 if (err < 0) 3206 if (err < 0)
3061 goto unlink; 3207 goto unlink;
3062 3208
3063 /* Get rid of temporary copy. */ 3209 /* Get rid of temporary copy. */
3064 free_copy(&info); 3210 free_copy(info);
3065 3211
3066 /* Done! */ 3212 /* Done! */
3067 trace_module_load(mod); 3213 trace_module_load(mod);
3068 return mod; 3214
3215 return do_init_module(mod);
3069 3216
3070 unlink: 3217 unlink:
3071 mutex_lock(&module_mutex); 3218 mutex_lock(&module_mutex);
@@ -3074,7 +3221,7 @@ again:
3074 module_bug_cleanup(mod); 3221 module_bug_cleanup(mod);
3075 wake_up_all(&module_wq); 3222 wake_up_all(&module_wq);
3076 ddebug: 3223 ddebug:
3077 dynamic_debug_remove(info.debug); 3224 dynamic_debug_remove(info->debug);
3078 unlock: 3225 unlock:
3079 mutex_unlock(&module_mutex); 3226 mutex_unlock(&module_mutex);
3080 synchronize_sched(); 3227 synchronize_sched();
@@ -3086,106 +3233,52 @@ again:
3086 free_unload: 3233 free_unload:
3087 module_unload_free(mod); 3234 module_unload_free(mod);
3088 free_module: 3235 free_module:
3089 module_deallocate(mod, &info); 3236 module_deallocate(mod, info);
3090 free_copy: 3237 free_copy:
3091 free_copy(&info); 3238 free_copy(info);
3092 return ERR_PTR(err); 3239 return err;
3093}
3094
3095/* Call module constructors. */
3096static void do_mod_ctors(struct module *mod)
3097{
3098#ifdef CONFIG_CONSTRUCTORS
3099 unsigned long i;
3100
3101 for (i = 0; i < mod->num_ctors; i++)
3102 mod->ctors[i]();
3103#endif
3104} 3240}
3105 3241
3106/* This is where the real work happens */
3107SYSCALL_DEFINE3(init_module, void __user *, umod, 3242SYSCALL_DEFINE3(init_module, void __user *, umod,
3108 unsigned long, len, const char __user *, uargs) 3243 unsigned long, len, const char __user *, uargs)
3109{ 3244{
3110 struct module *mod; 3245 int err;
3111 int ret = 0; 3246 struct load_info info = { };
3112 3247
3113 /* Must have permission */ 3248 err = may_init_module();
3114 if (!capable(CAP_SYS_MODULE) || modules_disabled) 3249 if (err)
3115 return -EPERM; 3250 return err;
3116 3251
3117 /* Do all the hard work */ 3252 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3118 mod = load_module(umod, len, uargs); 3253 umod, len, uargs);
3119 if (IS_ERR(mod))
3120 return PTR_ERR(mod);
3121 3254
3122 blocking_notifier_call_chain(&module_notify_list, 3255 err = copy_module_from_user(umod, len, &info);
3123 MODULE_STATE_COMING, mod); 3256 if (err)
3257 return err;
3124 3258
3125 /* Set RO and NX regions for core */ 3259 return load_module(&info, uargs, 0);
3126 set_section_ro_nx(mod->module_core, 3260}
3127 mod->core_text_size,
3128 mod->core_ro_size,
3129 mod->core_size);
3130 3261
3131 /* Set RO and NX regions for init */ 3262SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
3132 set_section_ro_nx(mod->module_init, 3263{
3133 mod->init_text_size, 3264 int err;
3134 mod->init_ro_size, 3265 struct load_info info = { };
3135 mod->init_size);
3136 3266
3137 do_mod_ctors(mod); 3267 err = may_init_module();
3138 /* Start the module */ 3268 if (err)
3139 if (mod->init != NULL) 3269 return err;
3140 ret = do_one_initcall(mod->init);
3141 if (ret < 0) {
3142 /* Init routine failed: abort. Try to protect us from
3143 buggy refcounters. */
3144 mod->state = MODULE_STATE_GOING;
3145 synchronize_sched();
3146 module_put(mod);
3147 blocking_notifier_call_chain(&module_notify_list,
3148 MODULE_STATE_GOING, mod);
3149 free_module(mod);
3150 wake_up_all(&module_wq);
3151 return ret;
3152 }
3153 if (ret > 0) {
3154 printk(KERN_WARNING
3155"%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n"
3156"%s: loading module anyway...\n",
3157 __func__, mod->name, ret,
3158 __func__);
3159 dump_stack();
3160 }
3161 3270
3162 /* Now it's a first class citizen! */ 3271 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
3163 mod->state = MODULE_STATE_LIVE;
3164 blocking_notifier_call_chain(&module_notify_list,
3165 MODULE_STATE_LIVE, mod);
3166 3272
3167 /* We need to finish all async code before the module init sequence is done */ 3273 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
3168 async_synchronize_full(); 3274 |MODULE_INIT_IGNORE_VERMAGIC))
3275 return -EINVAL;
3169 3276
3170 mutex_lock(&module_mutex); 3277 err = copy_module_from_fd(fd, &info);
3171 /* Drop initial reference. */ 3278 if (err)
3172 module_put(mod); 3279 return err;
3173 trim_init_extable(mod);
3174#ifdef CONFIG_KALLSYMS
3175 mod->num_symtab = mod->core_num_syms;
3176 mod->symtab = mod->core_symtab;
3177 mod->strtab = mod->core_strtab;
3178#endif
3179 unset_module_init_ro_nx(mod);
3180 module_free(mod, mod->module_init);
3181 mod->module_init = NULL;
3182 mod->init_size = 0;
3183 mod->init_ro_size = 0;
3184 mod->init_text_size = 0;
3185 mutex_unlock(&module_mutex);
3186 wake_up_all(&module_wq);
3187 3280
3188 return 0; 3281 return load_module(&info, uargs, flags);
3189} 3282}
3190 3283
3191static inline int within(unsigned long addr, void *start, unsigned long size) 3284static inline int within(unsigned long addr, void *start, unsigned long size)
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index d73840271dce..a278cad1d5d6 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -9,6 +9,7 @@
9#include <asm/uaccess.h> 9#include <asm/uaccess.h>
10#include <linux/kernel_stat.h> 10#include <linux/kernel_stat.h>
11#include <trace/events/timer.h> 11#include <trace/events/timer.h>
12#include <linux/random.h>
12 13
13/* 14/*
14 * Called after updating RLIMIT_CPU to run cpu timer and update 15 * Called after updating RLIMIT_CPU to run cpu timer and update
@@ -470,6 +471,8 @@ static void cleanup_timers(struct list_head *head,
470 */ 471 */
471void posix_cpu_timers_exit(struct task_struct *tsk) 472void posix_cpu_timers_exit(struct task_struct *tsk)
472{ 473{
474 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
475 sizeof(unsigned long long));
473 cleanup_timers(tsk->cpu_timers, 476 cleanup_timers(tsk->cpu_timers,
474 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime); 477 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
475 478
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index 3920d593e63c..ff55247e7049 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -86,33 +86,39 @@ int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
86 return __res_counter_charge(counter, val, limit_fail_at, true); 86 return __res_counter_charge(counter, val, limit_fail_at, true);
87} 87}
88 88
89void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) 89u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
90{ 90{
91 if (WARN_ON(counter->usage < val)) 91 if (WARN_ON(counter->usage < val))
92 val = counter->usage; 92 val = counter->usage;
93 93
94 counter->usage -= val; 94 counter->usage -= val;
95 return counter->usage;
95} 96}
96 97
97void res_counter_uncharge_until(struct res_counter *counter, 98u64 res_counter_uncharge_until(struct res_counter *counter,
98 struct res_counter *top, 99 struct res_counter *top,
99 unsigned long val) 100 unsigned long val)
100{ 101{
101 unsigned long flags; 102 unsigned long flags;
102 struct res_counter *c; 103 struct res_counter *c;
104 u64 ret = 0;
103 105
104 local_irq_save(flags); 106 local_irq_save(flags);
105 for (c = counter; c != top; c = c->parent) { 107 for (c = counter; c != top; c = c->parent) {
108 u64 r;
106 spin_lock(&c->lock); 109 spin_lock(&c->lock);
107 res_counter_uncharge_locked(c, val); 110 r = res_counter_uncharge_locked(c, val);
111 if (c == counter)
112 ret = r;
108 spin_unlock(&c->lock); 113 spin_unlock(&c->lock);
109 } 114 }
110 local_irq_restore(flags); 115 local_irq_restore(flags);
116 return ret;
111} 117}
112 118
113void res_counter_uncharge(struct res_counter *counter, unsigned long val) 119u64 res_counter_uncharge(struct res_counter *counter, unsigned long val)
114{ 120{
115 res_counter_uncharge_until(counter, NULL, val); 121 return res_counter_uncharge_until(counter, NULL, val);
116} 122}
117 123
118static inline unsigned long long * 124static inline unsigned long long *
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index dbff751e4086..395084d4ce16 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -25,6 +25,7 @@ cond_syscall(sys_swapoff);
25cond_syscall(sys_kexec_load); 25cond_syscall(sys_kexec_load);
26cond_syscall(compat_sys_kexec_load); 26cond_syscall(compat_sys_kexec_load);
27cond_syscall(sys_init_module); 27cond_syscall(sys_init_module);
28cond_syscall(sys_finit_module);
28cond_syscall(sys_delete_module); 29cond_syscall(sys_delete_module);
29cond_syscall(sys_socketpair); 30cond_syscall(sys_socketpair);
30cond_syscall(sys_bind); 31cond_syscall(sys_bind);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 997c6a16ec22..75a2ab3d0b02 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -344,6 +344,10 @@ static void watchdog_enable(unsigned int cpu)
344{ 344{
345 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); 345 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
346 346
347 /* kick off the timer for the hardlockup detector */
348 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
349 hrtimer->function = watchdog_timer_fn;
350
347 if (!watchdog_enabled) { 351 if (!watchdog_enabled) {
348 kthread_park(current); 352 kthread_park(current);
349 return; 353 return;
@@ -352,10 +356,6 @@ static void watchdog_enable(unsigned int cpu)
352 /* Enable the perf event */ 356 /* Enable the perf event */
353 watchdog_nmi_enable(cpu); 357 watchdog_nmi_enable(cpu);
354 358
355 /* kick off the timer for the hardlockup detector */
356 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
357 hrtimer->function = watchdog_timer_fn;
358
359 /* done here because hrtimer_start can only pin to smp_processor_id() */ 359 /* done here because hrtimer_start can only pin to smp_processor_id() */
360 hrtimer_start(hrtimer, ns_to_ktime(sample_period), 360 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
361 HRTIMER_MODE_REL_PINNED); 361 HRTIMER_MODE_REL_PINNED);
@@ -369,9 +369,6 @@ static void watchdog_disable(unsigned int cpu)
369{ 369{
370 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); 370 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
371 371
372 if (!watchdog_enabled)
373 return;
374
375 watchdog_set_prio(SCHED_NORMAL, 0); 372 watchdog_set_prio(SCHED_NORMAL, 0);
376 hrtimer_cancel(hrtimer); 373 hrtimer_cancel(hrtimer);
377 /* disable the perf event */ 374 /* disable the perf event */
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 5293d2433029..11b9b01fda6b 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -81,7 +81,7 @@ next_tag:
81 goto next_tag; 81 goto next_tag;
82 } 82 }
83 83
84 if (unlikely((tag & 0x1f) == 0x1f)) { 84 if (unlikely((tag & 0x1f) == ASN1_LONG_TAG)) {
85 do { 85 do {
86 if (unlikely(datalen - dp < 2)) 86 if (unlikely(datalen - dp < 2))
87 goto data_overrun_error; 87 goto data_overrun_error;
@@ -96,7 +96,7 @@ next_tag:
96 goto next_tag; 96 goto next_tag;
97 } 97 }
98 98
99 if (unlikely(len == 0x80)) { 99 if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
100 /* Indefinite length */ 100 /* Indefinite length */
101 if (unlikely((tag & ASN1_CONS_BIT) == ASN1_PRIM << 5)) 101 if (unlikely((tag & ASN1_CONS_BIT) == ASN1_PRIM << 5))
102 goto indefinite_len_primitive; 102 goto indefinite_len_primitive;
@@ -222,7 +222,7 @@ next_op:
222 if (unlikely(dp >= datalen - 1)) 222 if (unlikely(dp >= datalen - 1))
223 goto data_overrun_error; 223 goto data_overrun_error;
224 tag = data[dp++]; 224 tag = data[dp++];
225 if (unlikely((tag & 0x1f) == 0x1f)) 225 if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
226 goto long_tag_not_supported; 226 goto long_tag_not_supported;
227 227
228 if (op & ASN1_OP_MATCH__ANY) { 228 if (op & ASN1_OP_MATCH__ANY) {
@@ -254,7 +254,7 @@ next_op:
254 254
255 len = data[dp++]; 255 len = data[dp++];
256 if (len > 0x7f) { 256 if (len > 0x7f) {
257 if (unlikely(len == 0x80)) { 257 if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
258 /* Indefinite length */ 258 /* Indefinite length */
259 if (unlikely(!(tag & ASN1_CONS_BIT))) 259 if (unlikely(!(tag & ASN1_CONS_BIT)))
260 goto indefinite_len_primitive; 260 goto indefinite_len_primitive;
diff --git a/mm/Kconfig b/mm/Kconfig
index 71259e052ce8..278e3ab1f169 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -149,7 +149,18 @@ config MOVABLE_NODE
149 depends on NO_BOOTMEM 149 depends on NO_BOOTMEM
150 depends on X86_64 150 depends on X86_64
151 depends on NUMA 151 depends on NUMA
152 depends on BROKEN 152 default n
153 help
154 Allow a node to have only movable memory. Pages used by the kernel,
155 such as direct mapping pages cannot be migrated. So the corresponding
156 memory device cannot be hotplugged. This option allows users to
157 online all the memory of a node as movable memory so that the whole
158 node can be hotplugged. Users who don't use the memory hotplug
159 feature are fine with this option on since they don't online memory
160 as movable.
161
162 Say Y here if you want to hotplug a whole node.
163 Say N here if you want kernel to use memory on all nodes evenly.
153 164
154# eventually, we can have this option just 'select SPARSEMEM' 165# eventually, we can have this option just 'select SPARSEMEM'
155config MEMORY_HOTPLUG 166config MEMORY_HOTPLUG
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e5318c7793ae..4f3ea0b1e57c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1906,14 +1906,12 @@ static int __init hugetlb_init(void)
1906 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 1906 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1907 1907
1908 hugetlb_init_hstates(); 1908 hugetlb_init_hstates();
1909
1910 gather_bootmem_prealloc(); 1909 gather_bootmem_prealloc();
1911
1912 report_hugepages(); 1910 report_hugepages();
1913 1911
1914 hugetlb_sysfs_init(); 1912 hugetlb_sysfs_init();
1915
1916 hugetlb_register_all_nodes(); 1913 hugetlb_register_all_nodes();
1914 hugetlb_cgroup_file_init();
1917 1915
1918 return 0; 1916 return 0;
1919} 1917}
@@ -1943,13 +1941,6 @@ void __init hugetlb_add_hstate(unsigned order)
1943 h->next_nid_to_free = first_node(node_states[N_MEMORY]); 1941 h->next_nid_to_free = first_node(node_states[N_MEMORY]);
1944 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 1942 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1945 huge_page_size(h)/1024); 1943 huge_page_size(h)/1024);
1946 /*
1947 * Add cgroup control files only if the huge page consists
1948 * of more than two normal pages. This is because we use
1949 * page[2].lru.next for storing cgoup details.
1950 */
1951 if (order >= HUGETLB_CGROUP_MIN_ORDER)
1952 hugetlb_cgroup_file_init(hugetlb_max_hstate - 1);
1953 1944
1954 parsed_hstate = h; 1945 parsed_hstate = h;
1955} 1946}
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index b5bde7a5c017..9cea7de22ffb 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -333,7 +333,7 @@ static char *mem_fmt(char *buf, int size, unsigned long hsize)
333 return buf; 333 return buf;
334} 334}
335 335
336int __init hugetlb_cgroup_file_init(int idx) 336static void __init __hugetlb_cgroup_file_init(int idx)
337{ 337{
338 char buf[32]; 338 char buf[32];
339 struct cftype *cft; 339 struct cftype *cft;
@@ -375,7 +375,22 @@ int __init hugetlb_cgroup_file_init(int idx)
375 375
376 WARN_ON(cgroup_add_cftypes(&hugetlb_subsys, h->cgroup_files)); 376 WARN_ON(cgroup_add_cftypes(&hugetlb_subsys, h->cgroup_files));
377 377
378 return 0; 378 return;
379}
380
381void __init hugetlb_cgroup_file_init(void)
382{
383 struct hstate *h;
384
385 for_each_hstate(h) {
386 /*
387 * Add cgroup control files only if the huge page consists
388 * of more than two normal pages. This is because we use
389 * page[2].lru.next for storing cgroup details.
390 */
391 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
392 __hugetlb_cgroup_file_init(hstate_index(h));
393 }
379} 394}
380 395
381/* 396/*
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index a217cc544060..752a705c77c2 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1556,7 +1556,8 @@ static int dump_str_object_info(const char *str)
1556 struct kmemleak_object *object; 1556 struct kmemleak_object *object;
1557 unsigned long addr; 1557 unsigned long addr;
1558 1558
1559 addr= simple_strtoul(str, NULL, 0); 1559 if (kstrtoul(str, 0, &addr))
1560 return -EINVAL;
1560 object = find_and_get_object(addr, 0); 1561 object = find_and_get_object(addr, 0);
1561 if (!object) { 1562 if (!object) {
1562 pr_info("Unknown object at 0x%08lx\n", addr); 1563 pr_info("Unknown object at 0x%08lx\n", addr);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index bbfac5063ca8..f3009b4bae51 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -10,6 +10,10 @@
10 * Copyright (C) 2009 Nokia Corporation 10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov 11 * Author: Kirill A. Shutemov
12 * 12 *
13 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
13 * This program is free software; you can redistribute it and/or modify 17 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by 18 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or 19 * the Free Software Foundation; either version 2 of the License, or
@@ -268,6 +272,10 @@ struct mem_cgroup {
268 }; 272 };
269 273
270 /* 274 /*
275 * the counter to account for kernel memory usage.
276 */
277 struct res_counter kmem;
278 /*
271 * Per cgroup active and inactive list, similar to the 279 * Per cgroup active and inactive list, similar to the
272 * per zone LRU lists. 280 * per zone LRU lists.
273 */ 281 */
@@ -282,6 +290,7 @@ struct mem_cgroup {
282 * Should the accounting and control be hierarchical, per subtree? 290 * Should the accounting and control be hierarchical, per subtree?
283 */ 291 */
284 bool use_hierarchy; 292 bool use_hierarchy;
293 unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
285 294
286 bool oom_lock; 295 bool oom_lock;
287 atomic_t under_oom; 296 atomic_t under_oom;
@@ -332,8 +341,61 @@ struct mem_cgroup {
332#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) 341#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
333 struct tcp_memcontrol tcp_mem; 342 struct tcp_memcontrol tcp_mem;
334#endif 343#endif
344#if defined(CONFIG_MEMCG_KMEM)
345 /* analogous to slab_common's slab_caches list. per-memcg */
346 struct list_head memcg_slab_caches;
347 /* Not a spinlock, we can take a lot of time walking the list */
348 struct mutex slab_caches_mutex;
349 /* Index in the kmem_cache->memcg_params->memcg_caches array */
350 int kmemcg_id;
351#endif
335}; 352};
336 353
354/* internal only representation about the status of kmem accounting. */
355enum {
356 KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */
357 KMEM_ACCOUNTED_ACTIVATED, /* static key enabled. */
358 KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
359};
360
361/* We account when limit is on, but only after call sites are patched */
362#define KMEM_ACCOUNTED_MASK \
363 ((1 << KMEM_ACCOUNTED_ACTIVE) | (1 << KMEM_ACCOUNTED_ACTIVATED))
364
365#ifdef CONFIG_MEMCG_KMEM
366static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
367{
368 set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
369}
370
371static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
372{
373 return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
374}
375
376static void memcg_kmem_set_activated(struct mem_cgroup *memcg)
377{
378 set_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
379}
380
381static void memcg_kmem_clear_activated(struct mem_cgroup *memcg)
382{
383 clear_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
384}
385
386static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
387{
388 if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
389 set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
390}
391
392static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
393{
394 return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
395 &memcg->kmem_account_flags);
396}
397#endif
398
337/* Stuffs for move charges at task migration. */ 399/* Stuffs for move charges at task migration. */
338/* 400/*
339 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a 401 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
@@ -388,9 +450,13 @@ enum charge_type {
388}; 450};
389 451
390/* for encoding cft->private value on file */ 452/* for encoding cft->private value on file */
391#define _MEM (0) 453enum res_type {
392#define _MEMSWAP (1) 454 _MEM,
393#define _OOM_TYPE (2) 455 _MEMSWAP,
456 _OOM_TYPE,
457 _KMEM,
458};
459
394#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 460#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
395#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 461#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
396#define MEMFILE_ATTR(val) ((val) & 0xffff) 462#define MEMFILE_ATTR(val) ((val) & 0xffff)
@@ -487,6 +553,75 @@ static void disarm_sock_keys(struct mem_cgroup *memcg)
487} 553}
488#endif 554#endif
489 555
556#ifdef CONFIG_MEMCG_KMEM
557/*
558 * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
559 * There are two main reasons for not using the css_id for this:
560 * 1) this works better in sparse environments, where we have a lot of memcgs,
561 * but only a few kmem-limited. Or also, if we have, for instance, 200
562 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
563 * 200 entry array for that.
564 *
565 * 2) In order not to violate the cgroup API, we would like to do all memory
566 * allocation in ->create(). At that point, we haven't yet allocated the
567 * css_id. Having a separate index prevents us from messing with the cgroup
568 * core for this
569 *
570 * The current size of the caches array is stored in
571 * memcg_limited_groups_array_size. It will double each time we have to
572 * increase it.
573 */
574static DEFINE_IDA(kmem_limited_groups);
575int memcg_limited_groups_array_size;
576
577/*
578 * MIN_SIZE is different than 1, because we would like to avoid going through
579 * the alloc/free process all the time. In a small machine, 4 kmem-limited
580 * cgroups is a reasonable guess. In the future, it could be a parameter or
581 * tunable, but that is strictly not necessary.
582 *
583 * MAX_SIZE should be as large as the number of css_ids. Ideally, we could get
584 * this constant directly from cgroup, but it is understandable that this is
585 * better kept as an internal representation in cgroup.c. In any case, the
586 * css_id space is not getting any smaller, and we don't have to necessarily
587 * increase ours as well if it increases.
588 */
589#define MEMCG_CACHES_MIN_SIZE 4
590#define MEMCG_CACHES_MAX_SIZE 65535
591
592/*
593 * A lot of the calls to the cache allocation functions are expected to be
594 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
595 * conditional to this static branch, we'll have to allow modules that does
596 * kmem_cache_alloc and the such to see this symbol as well
597 */
598struct static_key memcg_kmem_enabled_key;
599EXPORT_SYMBOL(memcg_kmem_enabled_key);
600
601static void disarm_kmem_keys(struct mem_cgroup *memcg)
602{
603 if (memcg_kmem_is_active(memcg)) {
604 static_key_slow_dec(&memcg_kmem_enabled_key);
605 ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id);
606 }
607 /*
608 * This check can't live in kmem destruction function,
609 * since the charges will outlive the cgroup
610 */
611 WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0);
612}
613#else
614static void disarm_kmem_keys(struct mem_cgroup *memcg)
615{
616}
617#endif /* CONFIG_MEMCG_KMEM */
618
619static void disarm_static_keys(struct mem_cgroup *memcg)
620{
621 disarm_sock_keys(memcg);
622 disarm_kmem_keys(memcg);
623}
624
490static void drain_all_stock_async(struct mem_cgroup *memcg); 625static void drain_all_stock_async(struct mem_cgroup *memcg);
491 626
492static struct mem_cgroup_per_zone * 627static struct mem_cgroup_per_zone *
@@ -1453,6 +1588,10 @@ done:
1453 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10, 1588 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1454 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10, 1589 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1455 res_counter_read_u64(&memcg->memsw, RES_FAILCNT)); 1590 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1591 printk(KERN_INFO "kmem: usage %llukB, limit %llukB, failcnt %llu\n",
1592 res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10,
1593 res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
1594 res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
1456} 1595}
1457 1596
1458/* 1597/*
@@ -2060,20 +2199,28 @@ struct memcg_stock_pcp {
2060static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2199static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2061static DEFINE_MUTEX(percpu_charge_mutex); 2200static DEFINE_MUTEX(percpu_charge_mutex);
2062 2201
2063/* 2202/**
2064 * Try to consume stocked charge on this cpu. If success, one page is consumed 2203 * consume_stock: Try to consume stocked charge on this cpu.
2065 * from local stock and true is returned. If the stock is 0 or charges from a 2204 * @memcg: memcg to consume from.
2066 * cgroup which is not current target, returns false. This stock will be 2205 * @nr_pages: how many pages to charge.
2067 * refilled. 2206 *
2207 * The charges will only happen if @memcg matches the current cpu's memcg
2208 * stock, and at least @nr_pages are available in that stock. Failure to
2209 * service an allocation will refill the stock.
2210 *
2211 * returns true if successful, false otherwise.
2068 */ 2212 */
2069static bool consume_stock(struct mem_cgroup *memcg) 2213static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2070{ 2214{
2071 struct memcg_stock_pcp *stock; 2215 struct memcg_stock_pcp *stock;
2072 bool ret = true; 2216 bool ret = true;
2073 2217
2218 if (nr_pages > CHARGE_BATCH)
2219 return false;
2220
2074 stock = &get_cpu_var(memcg_stock); 2221 stock = &get_cpu_var(memcg_stock);
2075 if (memcg == stock->cached && stock->nr_pages) 2222 if (memcg == stock->cached && stock->nr_pages >= nr_pages)
2076 stock->nr_pages--; 2223 stock->nr_pages -= nr_pages;
2077 else /* need to call res_counter_charge */ 2224 else /* need to call res_counter_charge */
2078 ret = false; 2225 ret = false;
2079 put_cpu_var(memcg_stock); 2226 put_cpu_var(memcg_stock);
@@ -2250,7 +2397,8 @@ enum {
2250}; 2397};
2251 2398
2252static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2399static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2253 unsigned int nr_pages, bool oom_check) 2400 unsigned int nr_pages, unsigned int min_pages,
2401 bool oom_check)
2254{ 2402{
2255 unsigned long csize = nr_pages * PAGE_SIZE; 2403 unsigned long csize = nr_pages * PAGE_SIZE;
2256 struct mem_cgroup *mem_over_limit; 2404 struct mem_cgroup *mem_over_limit;
@@ -2273,18 +2421,18 @@ static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2273 } else 2421 } else
2274 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); 2422 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2275 /* 2423 /*
2276 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
2277 * of regular pages (CHARGE_BATCH), or a single regular page (1).
2278 *
2279 * Never reclaim on behalf of optional batching, retry with a 2424 * Never reclaim on behalf of optional batching, retry with a
2280 * single page instead. 2425 * single page instead.
2281 */ 2426 */
2282 if (nr_pages == CHARGE_BATCH) 2427 if (nr_pages > min_pages)
2283 return CHARGE_RETRY; 2428 return CHARGE_RETRY;
2284 2429
2285 if (!(gfp_mask & __GFP_WAIT)) 2430 if (!(gfp_mask & __GFP_WAIT))
2286 return CHARGE_WOULDBLOCK; 2431 return CHARGE_WOULDBLOCK;
2287 2432
2433 if (gfp_mask & __GFP_NORETRY)
2434 return CHARGE_NOMEM;
2435
2288 ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags); 2436 ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
2289 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2437 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2290 return CHARGE_RETRY; 2438 return CHARGE_RETRY;
@@ -2297,7 +2445,7 @@ static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2297 * unlikely to succeed so close to the limit, and we fall back 2445 * unlikely to succeed so close to the limit, and we fall back
2298 * to regular pages anyway in case of failure. 2446 * to regular pages anyway in case of failure.
2299 */ 2447 */
2300 if (nr_pages == 1 && ret) 2448 if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret)
2301 return CHARGE_RETRY; 2449 return CHARGE_RETRY;
2302 2450
2303 /* 2451 /*
@@ -2371,7 +2519,7 @@ again:
2371 memcg = *ptr; 2519 memcg = *ptr;
2372 if (mem_cgroup_is_root(memcg)) 2520 if (mem_cgroup_is_root(memcg))
2373 goto done; 2521 goto done;
2374 if (nr_pages == 1 && consume_stock(memcg)) 2522 if (consume_stock(memcg, nr_pages))
2375 goto done; 2523 goto done;
2376 css_get(&memcg->css); 2524 css_get(&memcg->css);
2377 } else { 2525 } else {
@@ -2396,7 +2544,7 @@ again:
2396 rcu_read_unlock(); 2544 rcu_read_unlock();
2397 goto done; 2545 goto done;
2398 } 2546 }
2399 if (nr_pages == 1 && consume_stock(memcg)) { 2547 if (consume_stock(memcg, nr_pages)) {
2400 /* 2548 /*
2401 * It seems dagerous to access memcg without css_get(). 2549 * It seems dagerous to access memcg without css_get().
2402 * But considering how consume_stok works, it's not 2550 * But considering how consume_stok works, it's not
@@ -2431,7 +2579,8 @@ again:
2431 nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 2579 nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2432 } 2580 }
2433 2581
2434 ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check); 2582 ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, nr_pages,
2583 oom_check);
2435 switch (ret) { 2584 switch (ret) {
2436 case CHARGE_OK: 2585 case CHARGE_OK:
2437 break; 2586 break;
@@ -2624,6 +2773,766 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2624 memcg_check_events(memcg, page); 2773 memcg_check_events(memcg, page);
2625} 2774}
2626 2775
2776static DEFINE_MUTEX(set_limit_mutex);
2777
2778#ifdef CONFIG_MEMCG_KMEM
2779static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
2780{
2781 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) &&
2782 (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK);
2783}
2784
2785/*
2786 * This is a bit cumbersome, but it is rarely used and avoids a backpointer
2787 * in the memcg_cache_params struct.
2788 */
2789static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
2790{
2791 struct kmem_cache *cachep;
2792
2793 VM_BUG_ON(p->is_root_cache);
2794 cachep = p->root_cache;
2795 return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)];
2796}
2797
2798#ifdef CONFIG_SLABINFO
2799static int mem_cgroup_slabinfo_read(struct cgroup *cont, struct cftype *cft,
2800 struct seq_file *m)
2801{
2802 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
2803 struct memcg_cache_params *params;
2804
2805 if (!memcg_can_account_kmem(memcg))
2806 return -EIO;
2807
2808 print_slabinfo_header(m);
2809
2810 mutex_lock(&memcg->slab_caches_mutex);
2811 list_for_each_entry(params, &memcg->memcg_slab_caches, list)
2812 cache_show(memcg_params_to_cache(params), m);
2813 mutex_unlock(&memcg->slab_caches_mutex);
2814
2815 return 0;
2816}
2817#endif
2818
2819static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
2820{
2821 struct res_counter *fail_res;
2822 struct mem_cgroup *_memcg;
2823 int ret = 0;
2824 bool may_oom;
2825
2826 ret = res_counter_charge(&memcg->kmem, size, &fail_res);
2827 if (ret)
2828 return ret;
2829
2830 /*
2831 * Conditions under which we can wait for the oom_killer. Those are
2832 * the same conditions tested by the core page allocator
2833 */
2834 may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY);
2835
2836 _memcg = memcg;
2837 ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT,
2838 &_memcg, may_oom);
2839
2840 if (ret == -EINTR) {
2841 /*
2842 * __mem_cgroup_try_charge() chosed to bypass to root due to
2843 * OOM kill or fatal signal. Since our only options are to
2844 * either fail the allocation or charge it to this cgroup, do
2845 * it as a temporary condition. But we can't fail. From a
2846 * kmem/slab perspective, the cache has already been selected,
2847 * by mem_cgroup_kmem_get_cache(), so it is too late to change
2848 * our minds.
2849 *
2850 * This condition will only trigger if the task entered
2851 * memcg_charge_kmem in a sane state, but was OOM-killed during
2852 * __mem_cgroup_try_charge() above. Tasks that were already
2853 * dying when the allocation triggers should have been already
2854 * directed to the root cgroup in memcontrol.h
2855 */
2856 res_counter_charge_nofail(&memcg->res, size, &fail_res);
2857 if (do_swap_account)
2858 res_counter_charge_nofail(&memcg->memsw, size,
2859 &fail_res);
2860 ret = 0;
2861 } else if (ret)
2862 res_counter_uncharge(&memcg->kmem, size);
2863
2864 return ret;
2865}
2866
2867static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
2868{
2869 res_counter_uncharge(&memcg->res, size);
2870 if (do_swap_account)
2871 res_counter_uncharge(&memcg->memsw, size);
2872
2873 /* Not down to 0 */
2874 if (res_counter_uncharge(&memcg->kmem, size))
2875 return;
2876
2877 if (memcg_kmem_test_and_clear_dead(memcg))
2878 mem_cgroup_put(memcg);
2879}
2880
2881void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep)
2882{
2883 if (!memcg)
2884 return;
2885
2886 mutex_lock(&memcg->slab_caches_mutex);
2887 list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
2888 mutex_unlock(&memcg->slab_caches_mutex);
2889}
2890
2891/*
2892 * helper for acessing a memcg's index. It will be used as an index in the
2893 * child cache array in kmem_cache, and also to derive its name. This function
2894 * will return -1 when this is not a kmem-limited memcg.
2895 */
2896int memcg_cache_id(struct mem_cgroup *memcg)
2897{
2898 return memcg ? memcg->kmemcg_id : -1;
2899}
2900
2901/*
2902 * This ends up being protected by the set_limit mutex, during normal
2903 * operation, because that is its main call site.
2904 *
2905 * But when we create a new cache, we can call this as well if its parent
2906 * is kmem-limited. That will have to hold set_limit_mutex as well.
2907 */
2908int memcg_update_cache_sizes(struct mem_cgroup *memcg)
2909{
2910 int num, ret;
2911
2912 num = ida_simple_get(&kmem_limited_groups,
2913 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2914 if (num < 0)
2915 return num;
2916 /*
2917 * After this point, kmem_accounted (that we test atomically in
2918 * the beginning of this conditional), is no longer 0. This
2919 * guarantees only one process will set the following boolean
2920 * to true. We don't need test_and_set because we're protected
2921 * by the set_limit_mutex anyway.
2922 */
2923 memcg_kmem_set_activated(memcg);
2924
2925 ret = memcg_update_all_caches(num+1);
2926 if (ret) {
2927 ida_simple_remove(&kmem_limited_groups, num);
2928 memcg_kmem_clear_activated(memcg);
2929 return ret;
2930 }
2931
2932 memcg->kmemcg_id = num;
2933 INIT_LIST_HEAD(&memcg->memcg_slab_caches);
2934 mutex_init(&memcg->slab_caches_mutex);
2935 return 0;
2936}
2937
2938static size_t memcg_caches_array_size(int num_groups)
2939{
2940 ssize_t size;
2941 if (num_groups <= 0)
2942 return 0;
2943
2944 size = 2 * num_groups;
2945 if (size < MEMCG_CACHES_MIN_SIZE)
2946 size = MEMCG_CACHES_MIN_SIZE;
2947 else if (size > MEMCG_CACHES_MAX_SIZE)
2948 size = MEMCG_CACHES_MAX_SIZE;
2949
2950 return size;
2951}
2952
2953/*
2954 * We should update the current array size iff all caches updates succeed. This
2955 * can only be done from the slab side. The slab mutex needs to be held when
2956 * calling this.
2957 */
2958void memcg_update_array_size(int num)
2959{
2960 if (num > memcg_limited_groups_array_size)
2961 memcg_limited_groups_array_size = memcg_caches_array_size(num);
2962}
2963
2964int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
2965{
2966 struct memcg_cache_params *cur_params = s->memcg_params;
2967
2968 VM_BUG_ON(s->memcg_params && !s->memcg_params->is_root_cache);
2969
2970 if (num_groups > memcg_limited_groups_array_size) {
2971 int i;
2972 ssize_t size = memcg_caches_array_size(num_groups);
2973
2974 size *= sizeof(void *);
2975 size += sizeof(struct memcg_cache_params);
2976
2977 s->memcg_params = kzalloc(size, GFP_KERNEL);
2978 if (!s->memcg_params) {
2979 s->memcg_params = cur_params;
2980 return -ENOMEM;
2981 }
2982
2983 s->memcg_params->is_root_cache = true;
2984
2985 /*
2986 * There is the chance it will be bigger than
2987 * memcg_limited_groups_array_size, if we failed an allocation
2988 * in a cache, in which case all caches updated before it, will
2989 * have a bigger array.
2990 *
2991 * But if that is the case, the data after
2992 * memcg_limited_groups_array_size is certainly unused
2993 */
2994 for (i = 0; i < memcg_limited_groups_array_size; i++) {
2995 if (!cur_params->memcg_caches[i])
2996 continue;
2997 s->memcg_params->memcg_caches[i] =
2998 cur_params->memcg_caches[i];
2999 }
3000
3001 /*
3002 * Ideally, we would wait until all caches succeed, and only
3003 * then free the old one. But this is not worth the extra
3004 * pointer per-cache we'd have to have for this.
3005 *
3006 * It is not a big deal if some caches are left with a size
3007 * bigger than the others. And all updates will reset this
3008 * anyway.
3009 */
3010 kfree(cur_params);
3011 }
3012 return 0;
3013}
3014
3015int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
3016 struct kmem_cache *root_cache)
3017{
3018 size_t size = sizeof(struct memcg_cache_params);
3019
3020 if (!memcg_kmem_enabled())
3021 return 0;
3022
3023 if (!memcg)
3024 size += memcg_limited_groups_array_size * sizeof(void *);
3025
3026 s->memcg_params = kzalloc(size, GFP_KERNEL);
3027 if (!s->memcg_params)
3028 return -ENOMEM;
3029
3030 if (memcg) {
3031 s->memcg_params->memcg = memcg;
3032 s->memcg_params->root_cache = root_cache;
3033 }
3034 return 0;
3035}
3036
3037void memcg_release_cache(struct kmem_cache *s)
3038{
3039 struct kmem_cache *root;
3040 struct mem_cgroup *memcg;
3041 int id;
3042
3043 /*
3044 * This happens, for instance, when a root cache goes away before we
3045 * add any memcg.
3046 */
3047 if (!s->memcg_params)
3048 return;
3049
3050 if (s->memcg_params->is_root_cache)
3051 goto out;
3052
3053 memcg = s->memcg_params->memcg;
3054 id = memcg_cache_id(memcg);
3055
3056 root = s->memcg_params->root_cache;
3057 root->memcg_params->memcg_caches[id] = NULL;
3058 mem_cgroup_put(memcg);
3059
3060 mutex_lock(&memcg->slab_caches_mutex);
3061 list_del(&s->memcg_params->list);
3062 mutex_unlock(&memcg->slab_caches_mutex);
3063
3064out:
3065 kfree(s->memcg_params);
3066}
3067
3068/*
3069 * During the creation a new cache, we need to disable our accounting mechanism
3070 * altogether. This is true even if we are not creating, but rather just
3071 * enqueing new caches to be created.
3072 *
3073 * This is because that process will trigger allocations; some visible, like
3074 * explicit kmallocs to auxiliary data structures, name strings and internal
3075 * cache structures; some well concealed, like INIT_WORK() that can allocate
3076 * objects during debug.
3077 *
3078 * If any allocation happens during memcg_kmem_get_cache, we will recurse back
3079 * to it. This may not be a bounded recursion: since the first cache creation
3080 * failed to complete (waiting on the allocation), we'll just try to create the
3081 * cache again, failing at the same point.
3082 *
3083 * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
3084 * memcg_kmem_skip_account. So we enclose anything that might allocate memory
3085 * inside the following two functions.
3086 */
3087static inline void memcg_stop_kmem_account(void)
3088{
3089 VM_BUG_ON(!current->mm);
3090 current->memcg_kmem_skip_account++;
3091}
3092
3093static inline void memcg_resume_kmem_account(void)
3094{
3095 VM_BUG_ON(!current->mm);
3096 current->memcg_kmem_skip_account--;
3097}
3098
3099static void kmem_cache_destroy_work_func(struct work_struct *w)
3100{
3101 struct kmem_cache *cachep;
3102 struct memcg_cache_params *p;
3103
3104 p = container_of(w, struct memcg_cache_params, destroy);
3105
3106 cachep = memcg_params_to_cache(p);
3107
3108 /*
3109 * If we get down to 0 after shrink, we could delete right away.
3110 * However, memcg_release_pages() already puts us back in the workqueue
3111 * in that case. If we proceed deleting, we'll get a dangling
3112 * reference, and removing the object from the workqueue in that case
3113 * is unnecessary complication. We are not a fast path.
3114 *
3115 * Note that this case is fundamentally different from racing with
3116 * shrink_slab(): if memcg_cgroup_destroy_cache() is called in
3117 * kmem_cache_shrink, not only we would be reinserting a dead cache
3118 * into the queue, but doing so from inside the worker racing to
3119 * destroy it.
3120 *
3121 * So if we aren't down to zero, we'll just schedule a worker and try
3122 * again
3123 */
3124 if (atomic_read(&cachep->memcg_params->nr_pages) != 0) {
3125 kmem_cache_shrink(cachep);
3126 if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
3127 return;
3128 } else
3129 kmem_cache_destroy(cachep);
3130}
3131
3132void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
3133{
3134 if (!cachep->memcg_params->dead)
3135 return;
3136
3137 /*
3138 * There are many ways in which we can get here.
3139 *
3140 * We can get to a memory-pressure situation while the delayed work is
3141 * still pending to run. The vmscan shrinkers can then release all
3142 * cache memory and get us to destruction. If this is the case, we'll
3143 * be executed twice, which is a bug (the second time will execute over
3144 * bogus data). In this case, cancelling the work should be fine.
3145 *
3146 * But we can also get here from the worker itself, if
3147 * kmem_cache_shrink is enough to shake all the remaining objects and
3148 * get the page count to 0. In this case, we'll deadlock if we try to
3149 * cancel the work (the worker runs with an internal lock held, which
3150 * is the same lock we would hold for cancel_work_sync().)
3151 *
3152 * Since we can't possibly know who got us here, just refrain from
3153 * running if there is already work pending
3154 */
3155 if (work_pending(&cachep->memcg_params->destroy))
3156 return;
3157 /*
3158 * We have to defer the actual destroying to a workqueue, because
3159 * we might currently be in a context that cannot sleep.
3160 */
3161 schedule_work(&cachep->memcg_params->destroy);
3162}
3163
3164static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s)
3165{
3166 char *name;
3167 struct dentry *dentry;
3168
3169 rcu_read_lock();
3170 dentry = rcu_dereference(memcg->css.cgroup->dentry);
3171 rcu_read_unlock();
3172
3173 BUG_ON(dentry == NULL);
3174
3175 name = kasprintf(GFP_KERNEL, "%s(%d:%s)", s->name,
3176 memcg_cache_id(memcg), dentry->d_name.name);
3177
3178 return name;
3179}
3180
3181static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
3182 struct kmem_cache *s)
3183{
3184 char *name;
3185 struct kmem_cache *new;
3186
3187 name = memcg_cache_name(memcg, s);
3188 if (!name)
3189 return NULL;
3190
3191 new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align,
3192 (s->flags & ~SLAB_PANIC), s->ctor, s);
3193
3194 if (new)
3195 new->allocflags |= __GFP_KMEMCG;
3196
3197 kfree(name);
3198 return new;
3199}
3200
3201/*
3202 * This lock protects updaters, not readers. We want readers to be as fast as
3203 * they can, and they will either see NULL or a valid cache value. Our model
3204 * allow them to see NULL, in which case the root memcg will be selected.
3205 *
3206 * We need this lock because multiple allocations to the same cache from a non
3207 * will span more than one worker. Only one of them can create the cache.
3208 */
3209static DEFINE_MUTEX(memcg_cache_mutex);
3210static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
3211 struct kmem_cache *cachep)
3212{
3213 struct kmem_cache *new_cachep;
3214 int idx;
3215
3216 BUG_ON(!memcg_can_account_kmem(memcg));
3217
3218 idx = memcg_cache_id(memcg);
3219
3220 mutex_lock(&memcg_cache_mutex);
3221 new_cachep = cachep->memcg_params->memcg_caches[idx];
3222 if (new_cachep)
3223 goto out;
3224
3225 new_cachep = kmem_cache_dup(memcg, cachep);
3226 if (new_cachep == NULL) {
3227 new_cachep = cachep;
3228 goto out;
3229 }
3230
3231 mem_cgroup_get(memcg);
3232 atomic_set(&new_cachep->memcg_params->nr_pages , 0);
3233
3234 cachep->memcg_params->memcg_caches[idx] = new_cachep;
3235 /*
3236 * the readers won't lock, make sure everybody sees the updated value,
3237 * so they won't put stuff in the queue again for no reason
3238 */
3239 wmb();
3240out:
3241 mutex_unlock(&memcg_cache_mutex);
3242 return new_cachep;
3243}
3244
3245void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
3246{
3247 struct kmem_cache *c;
3248 int i;
3249
3250 if (!s->memcg_params)
3251 return;
3252 if (!s->memcg_params->is_root_cache)
3253 return;
3254
3255 /*
3256 * If the cache is being destroyed, we trust that there is no one else
3257 * requesting objects from it. Even if there are, the sanity checks in
3258 * kmem_cache_destroy should caught this ill-case.
3259 *
3260 * Still, we don't want anyone else freeing memcg_caches under our
3261 * noses, which can happen if a new memcg comes to life. As usual,
3262 * we'll take the set_limit_mutex to protect ourselves against this.
3263 */
3264 mutex_lock(&set_limit_mutex);
3265 for (i = 0; i < memcg_limited_groups_array_size; i++) {
3266 c = s->memcg_params->memcg_caches[i];
3267 if (!c)
3268 continue;
3269
3270 /*
3271 * We will now manually delete the caches, so to avoid races
3272 * we need to cancel all pending destruction workers and
3273 * proceed with destruction ourselves.
3274 *
3275 * kmem_cache_destroy() will call kmem_cache_shrink internally,
3276 * and that could spawn the workers again: it is likely that
3277 * the cache still have active pages until this very moment.
3278 * This would lead us back to mem_cgroup_destroy_cache.
3279 *
3280 * But that will not execute at all if the "dead" flag is not
3281 * set, so flip it down to guarantee we are in control.
3282 */
3283 c->memcg_params->dead = false;
3284 cancel_work_sync(&c->memcg_params->destroy);
3285 kmem_cache_destroy(c);
3286 }
3287 mutex_unlock(&set_limit_mutex);
3288}
3289
3290struct create_work {
3291 struct mem_cgroup *memcg;
3292 struct kmem_cache *cachep;
3293 struct work_struct work;
3294};
3295
3296static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
3297{
3298 struct kmem_cache *cachep;
3299 struct memcg_cache_params *params;
3300
3301 if (!memcg_kmem_is_active(memcg))
3302 return;
3303
3304 mutex_lock(&memcg->slab_caches_mutex);
3305 list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
3306 cachep = memcg_params_to_cache(params);
3307 cachep->memcg_params->dead = true;
3308 INIT_WORK(&cachep->memcg_params->destroy,
3309 kmem_cache_destroy_work_func);
3310 schedule_work(&cachep->memcg_params->destroy);
3311 }
3312 mutex_unlock(&memcg->slab_caches_mutex);
3313}
3314
3315static void memcg_create_cache_work_func(struct work_struct *w)
3316{
3317 struct create_work *cw;
3318
3319 cw = container_of(w, struct create_work, work);
3320 memcg_create_kmem_cache(cw->memcg, cw->cachep);
3321 /* Drop the reference gotten when we enqueued. */
3322 css_put(&cw->memcg->css);
3323 kfree(cw);
3324}
3325
3326/*
3327 * Enqueue the creation of a per-memcg kmem_cache.
3328 * Called with rcu_read_lock.
3329 */
3330static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3331 struct kmem_cache *cachep)
3332{
3333 struct create_work *cw;
3334
3335 cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
3336 if (cw == NULL)
3337 return;
3338
3339 /* The corresponding put will be done in the workqueue. */
3340 if (!css_tryget(&memcg->css)) {
3341 kfree(cw);
3342 return;
3343 }
3344
3345 cw->memcg = memcg;
3346 cw->cachep = cachep;
3347
3348 INIT_WORK(&cw->work, memcg_create_cache_work_func);
3349 schedule_work(&cw->work);
3350}
3351
3352static void memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3353 struct kmem_cache *cachep)
3354{
3355 /*
3356 * We need to stop accounting when we kmalloc, because if the
3357 * corresponding kmalloc cache is not yet created, the first allocation
3358 * in __memcg_create_cache_enqueue will recurse.
3359 *
3360 * However, it is better to enclose the whole function. Depending on
3361 * the debugging options enabled, INIT_WORK(), for instance, can
3362 * trigger an allocation. This too, will make us recurse. Because at
3363 * this point we can't allow ourselves back into memcg_kmem_get_cache,
3364 * the safest choice is to do it like this, wrapping the whole function.
3365 */
3366 memcg_stop_kmem_account();
3367 __memcg_create_cache_enqueue(memcg, cachep);
3368 memcg_resume_kmem_account();
3369}
3370/*
3371 * Return the kmem_cache we're supposed to use for a slab allocation.
3372 * We try to use the current memcg's version of the cache.
3373 *
3374 * If the cache does not exist yet, if we are the first user of it,
3375 * we either create it immediately, if possible, or create it asynchronously
3376 * in a workqueue.
3377 * In the latter case, we will let the current allocation go through with
3378 * the original cache.
3379 *
3380 * Can't be called in interrupt context or from kernel threads.
3381 * This function needs to be called with rcu_read_lock() held.
3382 */
3383struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3384 gfp_t gfp)
3385{
3386 struct mem_cgroup *memcg;
3387 int idx;
3388
3389 VM_BUG_ON(!cachep->memcg_params);
3390 VM_BUG_ON(!cachep->memcg_params->is_root_cache);
3391
3392 if (!current->mm || current->memcg_kmem_skip_account)
3393 return cachep;
3394
3395 rcu_read_lock();
3396 memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
3397 rcu_read_unlock();
3398
3399 if (!memcg_can_account_kmem(memcg))
3400 return cachep;
3401
3402 idx = memcg_cache_id(memcg);
3403
3404 /*
3405 * barrier to mare sure we're always seeing the up to date value. The
3406 * code updating memcg_caches will issue a write barrier to match this.
3407 */
3408 read_barrier_depends();
3409 if (unlikely(cachep->memcg_params->memcg_caches[idx] == NULL)) {
3410 /*
3411 * If we are in a safe context (can wait, and not in interrupt
3412 * context), we could be be predictable and return right away.
3413 * This would guarantee that the allocation being performed
3414 * already belongs in the new cache.
3415 *
3416 * However, there are some clashes that can arrive from locking.
3417 * For instance, because we acquire the slab_mutex while doing
3418 * kmem_cache_dup, this means no further allocation could happen
3419 * with the slab_mutex held.
3420 *
3421 * Also, because cache creation issue get_online_cpus(), this
3422 * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
3423 * that ends up reversed during cpu hotplug. (cpuset allocates
3424 * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
3425 * better to defer everything.
3426 */
3427 memcg_create_cache_enqueue(memcg, cachep);
3428 return cachep;
3429 }
3430
3431 return cachep->memcg_params->memcg_caches[idx];
3432}
3433EXPORT_SYMBOL(__memcg_kmem_get_cache);
3434
3435/*
3436 * We need to verify if the allocation against current->mm->owner's memcg is
3437 * possible for the given order. But the page is not allocated yet, so we'll
3438 * need a further commit step to do the final arrangements.
3439 *
3440 * It is possible for the task to switch cgroups in this mean time, so at
3441 * commit time, we can't rely on task conversion any longer. We'll then use
3442 * the handle argument to return to the caller which cgroup we should commit
3443 * against. We could also return the memcg directly and avoid the pointer
3444 * passing, but a boolean return value gives better semantics considering
3445 * the compiled-out case as well.
3446 *
3447 * Returning true means the allocation is possible.
3448 */
3449bool
3450__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
3451{
3452 struct mem_cgroup *memcg;
3453 int ret;
3454
3455 *_memcg = NULL;
3456 memcg = try_get_mem_cgroup_from_mm(current->mm);
3457
3458 /*
3459 * very rare case described in mem_cgroup_from_task. Unfortunately there
3460 * isn't much we can do without complicating this too much, and it would
3461 * be gfp-dependent anyway. Just let it go
3462 */
3463 if (unlikely(!memcg))
3464 return true;
3465
3466 if (!memcg_can_account_kmem(memcg)) {
3467 css_put(&memcg->css);
3468 return true;
3469 }
3470
3471 ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order);
3472 if (!ret)
3473 *_memcg = memcg;
3474
3475 css_put(&memcg->css);
3476 return (ret == 0);
3477}
3478
3479void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
3480 int order)
3481{
3482 struct page_cgroup *pc;
3483
3484 VM_BUG_ON(mem_cgroup_is_root(memcg));
3485
3486 /* The page allocation failed. Revert */
3487 if (!page) {
3488 memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3489 return;
3490 }
3491
3492 pc = lookup_page_cgroup(page);
3493 lock_page_cgroup(pc);
3494 pc->mem_cgroup = memcg;
3495 SetPageCgroupUsed(pc);
3496 unlock_page_cgroup(pc);
3497}
3498
3499void __memcg_kmem_uncharge_pages(struct page *page, int order)
3500{
3501 struct mem_cgroup *memcg = NULL;
3502 struct page_cgroup *pc;
3503
3504
3505 pc = lookup_page_cgroup(page);
3506 /*
3507 * Fast unlocked return. Theoretically might have changed, have to
3508 * check again after locking.
3509 */
3510 if (!PageCgroupUsed(pc))
3511 return;
3512
3513 lock_page_cgroup(pc);
3514 if (PageCgroupUsed(pc)) {
3515 memcg = pc->mem_cgroup;
3516 ClearPageCgroupUsed(pc);
3517 }
3518 unlock_page_cgroup(pc);
3519
3520 /*
3521 * We trust that only if there is a memcg associated with the page, it
3522 * is a valid allocation
3523 */
3524 if (!memcg)
3525 return;
3526
3527 VM_BUG_ON(mem_cgroup_is_root(memcg));
3528 memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3529}
3530#else
3531static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
3532{
3533}
3534#endif /* CONFIG_MEMCG_KMEM */
3535
2627#ifdef CONFIG_TRANSPARENT_HUGEPAGE 3536#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2628 3537
2629#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION) 3538#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
@@ -3486,8 +4395,6 @@ void mem_cgroup_print_bad_page(struct page *page)
3486} 4395}
3487#endif 4396#endif
3488 4397
3489static DEFINE_MUTEX(set_limit_mutex);
3490
3491static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 4398static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3492 unsigned long long val) 4399 unsigned long long val)
3493{ 4400{
@@ -3772,6 +4679,7 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
3772static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg) 4679static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
3773{ 4680{
3774 int node, zid; 4681 int node, zid;
4682 u64 usage;
3775 4683
3776 do { 4684 do {
3777 /* This is for making all *used* pages to be on LRU. */ 4685 /* This is for making all *used* pages to be on LRU. */
@@ -3792,13 +4700,20 @@ static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
3792 cond_resched(); 4700 cond_resched();
3793 4701
3794 /* 4702 /*
4703 * Kernel memory may not necessarily be trackable to a specific
4704 * process. So they are not migrated, and therefore we can't
4705 * expect their value to drop to 0 here.
4706 * Having res filled up with kmem only is enough.
4707 *
3795 * This is a safety check because mem_cgroup_force_empty_list 4708 * This is a safety check because mem_cgroup_force_empty_list
3796 * could have raced with mem_cgroup_replace_page_cache callers 4709 * could have raced with mem_cgroup_replace_page_cache callers
3797 * so the lru seemed empty but the page could have been added 4710 * so the lru seemed empty but the page could have been added
3798 * right after the check. RES_USAGE should be safe as we always 4711 * right after the check. RES_USAGE should be safe as we always
3799 * charge before adding to the LRU. 4712 * charge before adding to the LRU.
3800 */ 4713 */
3801 } while (res_counter_read_u64(&memcg->res, RES_USAGE) > 0); 4714 usage = res_counter_read_u64(&memcg->res, RES_USAGE) -
4715 res_counter_read_u64(&memcg->kmem, RES_USAGE);
4716 } while (usage > 0);
3802} 4717}
3803 4718
3804/* 4719/*
@@ -3942,7 +4857,8 @@ static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
3942 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 4857 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3943 char str[64]; 4858 char str[64];
3944 u64 val; 4859 u64 val;
3945 int type, name, len; 4860 int name, len;
4861 enum res_type type;
3946 4862
3947 type = MEMFILE_TYPE(cft->private); 4863 type = MEMFILE_TYPE(cft->private);
3948 name = MEMFILE_ATTR(cft->private); 4864 name = MEMFILE_ATTR(cft->private);
@@ -3963,6 +4879,9 @@ static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
3963 else 4879 else
3964 val = res_counter_read_u64(&memcg->memsw, name); 4880 val = res_counter_read_u64(&memcg->memsw, name);
3965 break; 4881 break;
4882 case _KMEM:
4883 val = res_counter_read_u64(&memcg->kmem, name);
4884 break;
3966 default: 4885 default:
3967 BUG(); 4886 BUG();
3968 } 4887 }
@@ -3970,6 +4889,125 @@ static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
3970 len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val); 4889 len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val);
3971 return simple_read_from_buffer(buf, nbytes, ppos, str, len); 4890 return simple_read_from_buffer(buf, nbytes, ppos, str, len);
3972} 4891}
4892
4893static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
4894{
4895 int ret = -EINVAL;
4896#ifdef CONFIG_MEMCG_KMEM
4897 bool must_inc_static_branch = false;
4898
4899 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4900 /*
4901 * For simplicity, we won't allow this to be disabled. It also can't
4902 * be changed if the cgroup has children already, or if tasks had
4903 * already joined.
4904 *
4905 * If tasks join before we set the limit, a person looking at
4906 * kmem.usage_in_bytes will have no way to determine when it took
4907 * place, which makes the value quite meaningless.
4908 *
4909 * After it first became limited, changes in the value of the limit are
4910 * of course permitted.
4911 *
4912 * Taking the cgroup_lock is really offensive, but it is so far the only
4913 * way to guarantee that no children will appear. There are plenty of
4914 * other offenders, and they should all go away. Fine grained locking
4915 * is probably the way to go here. When we are fully hierarchical, we
4916 * can also get rid of the use_hierarchy check.
4917 */
4918 cgroup_lock();
4919 mutex_lock(&set_limit_mutex);
4920 if (!memcg->kmem_account_flags && val != RESOURCE_MAX) {
4921 if (cgroup_task_count(cont) || (memcg->use_hierarchy &&
4922 !list_empty(&cont->children))) {
4923 ret = -EBUSY;
4924 goto out;
4925 }
4926 ret = res_counter_set_limit(&memcg->kmem, val);
4927 VM_BUG_ON(ret);
4928
4929 ret = memcg_update_cache_sizes(memcg);
4930 if (ret) {
4931 res_counter_set_limit(&memcg->kmem, RESOURCE_MAX);
4932 goto out;
4933 }
4934 must_inc_static_branch = true;
4935 /*
4936 * kmem charges can outlive the cgroup. In the case of slab
4937 * pages, for instance, a page contain objects from various
4938 * processes, so it is unfeasible to migrate them away. We
4939 * need to reference count the memcg because of that.
4940 */
4941 mem_cgroup_get(memcg);
4942 } else
4943 ret = res_counter_set_limit(&memcg->kmem, val);
4944out:
4945 mutex_unlock(&set_limit_mutex);
4946 cgroup_unlock();
4947
4948 /*
4949 * We are by now familiar with the fact that we can't inc the static
4950 * branch inside cgroup_lock. See disarm functions for details. A
4951 * worker here is overkill, but also wrong: After the limit is set, we
4952 * must start accounting right away. Since this operation can't fail,
4953 * we can safely defer it to here - no rollback will be needed.
4954 *
4955 * The boolean used to control this is also safe, because
4956 * KMEM_ACCOUNTED_ACTIVATED guarantees that only one process will be
4957 * able to set it to true;
4958 */
4959 if (must_inc_static_branch) {
4960 static_key_slow_inc(&memcg_kmem_enabled_key);
4961 /*
4962 * setting the active bit after the inc will guarantee no one
4963 * starts accounting before all call sites are patched
4964 */
4965 memcg_kmem_set_active(memcg);
4966 }
4967
4968#endif
4969 return ret;
4970}
4971
4972static int memcg_propagate_kmem(struct mem_cgroup *memcg)
4973{
4974 int ret = 0;
4975 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4976 if (!parent)
4977 goto out;
4978
4979 memcg->kmem_account_flags = parent->kmem_account_flags;
4980#ifdef CONFIG_MEMCG_KMEM
4981 /*
4982 * When that happen, we need to disable the static branch only on those
4983 * memcgs that enabled it. To achieve this, we would be forced to
4984 * complicate the code by keeping track of which memcgs were the ones
4985 * that actually enabled limits, and which ones got it from its
4986 * parents.
4987 *
4988 * It is a lot simpler just to do static_key_slow_inc() on every child
4989 * that is accounted.
4990 */
4991 if (!memcg_kmem_is_active(memcg))
4992 goto out;
4993
4994 /*
4995 * destroy(), called if we fail, will issue static_key_slow_inc() and
4996 * mem_cgroup_put() if kmem is enabled. We have to either call them
4997 * unconditionally, or clear the KMEM_ACTIVE flag. I personally find
4998 * this more consistent, since it always leads to the same destroy path
4999 */
5000 mem_cgroup_get(memcg);
5001 static_key_slow_inc(&memcg_kmem_enabled_key);
5002
5003 mutex_lock(&set_limit_mutex);
5004 ret = memcg_update_cache_sizes(memcg);
5005 mutex_unlock(&set_limit_mutex);
5006#endif
5007out:
5008 return ret;
5009}
5010
3973/* 5011/*
3974 * The user of this function is... 5012 * The user of this function is...
3975 * RES_LIMIT. 5013 * RES_LIMIT.
@@ -3978,7 +5016,8 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3978 const char *buffer) 5016 const char *buffer)
3979{ 5017{
3980 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 5018 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3981 int type, name; 5019 enum res_type type;
5020 int name;
3982 unsigned long long val; 5021 unsigned long long val;
3983 int ret; 5022 int ret;
3984 5023
@@ -4000,8 +5039,12 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
4000 break; 5039 break;
4001 if (type == _MEM) 5040 if (type == _MEM)
4002 ret = mem_cgroup_resize_limit(memcg, val); 5041 ret = mem_cgroup_resize_limit(memcg, val);
4003 else 5042 else if (type == _MEMSWAP)
4004 ret = mem_cgroup_resize_memsw_limit(memcg, val); 5043 ret = mem_cgroup_resize_memsw_limit(memcg, val);
5044 else if (type == _KMEM)
5045 ret = memcg_update_kmem_limit(cont, val);
5046 else
5047 return -EINVAL;
4005 break; 5048 break;
4006 case RES_SOFT_LIMIT: 5049 case RES_SOFT_LIMIT:
4007 ret = res_counter_memparse_write_strategy(buffer, &val); 5050 ret = res_counter_memparse_write_strategy(buffer, &val);
@@ -4054,7 +5097,8 @@ out:
4054static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) 5097static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
4055{ 5098{
4056 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 5099 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4057 int type, name; 5100 int name;
5101 enum res_type type;
4058 5102
4059 type = MEMFILE_TYPE(event); 5103 type = MEMFILE_TYPE(event);
4060 name = MEMFILE_ATTR(event); 5104 name = MEMFILE_ATTR(event);
@@ -4066,14 +5110,22 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
4066 case RES_MAX_USAGE: 5110 case RES_MAX_USAGE:
4067 if (type == _MEM) 5111 if (type == _MEM)
4068 res_counter_reset_max(&memcg->res); 5112 res_counter_reset_max(&memcg->res);
4069 else 5113 else if (type == _MEMSWAP)
4070 res_counter_reset_max(&memcg->memsw); 5114 res_counter_reset_max(&memcg->memsw);
5115 else if (type == _KMEM)
5116 res_counter_reset_max(&memcg->kmem);
5117 else
5118 return -EINVAL;
4071 break; 5119 break;
4072 case RES_FAILCNT: 5120 case RES_FAILCNT:
4073 if (type == _MEM) 5121 if (type == _MEM)
4074 res_counter_reset_failcnt(&memcg->res); 5122 res_counter_reset_failcnt(&memcg->res);
4075 else 5123 else if (type == _MEMSWAP)
4076 res_counter_reset_failcnt(&memcg->memsw); 5124 res_counter_reset_failcnt(&memcg->memsw);
5125 else if (type == _KMEM)
5126 res_counter_reset_failcnt(&memcg->kmem);
5127 else
5128 return -EINVAL;
4077 break; 5129 break;
4078 } 5130 }
4079 5131
@@ -4390,7 +5442,7 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
4390 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 5442 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4391 struct mem_cgroup_thresholds *thresholds; 5443 struct mem_cgroup_thresholds *thresholds;
4392 struct mem_cgroup_threshold_ary *new; 5444 struct mem_cgroup_threshold_ary *new;
4393 int type = MEMFILE_TYPE(cft->private); 5445 enum res_type type = MEMFILE_TYPE(cft->private);
4394 u64 threshold, usage; 5446 u64 threshold, usage;
4395 int i, size, ret; 5447 int i, size, ret;
4396 5448
@@ -4473,7 +5525,7 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
4473 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 5525 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4474 struct mem_cgroup_thresholds *thresholds; 5526 struct mem_cgroup_thresholds *thresholds;
4475 struct mem_cgroup_threshold_ary *new; 5527 struct mem_cgroup_threshold_ary *new;
4476 int type = MEMFILE_TYPE(cft->private); 5528 enum res_type type = MEMFILE_TYPE(cft->private);
4477 u64 usage; 5529 u64 usage;
4478 int i, j, size; 5530 int i, j, size;
4479 5531
@@ -4551,7 +5603,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
4551{ 5603{
4552 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 5604 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4553 struct mem_cgroup_eventfd_list *event; 5605 struct mem_cgroup_eventfd_list *event;
4554 int type = MEMFILE_TYPE(cft->private); 5606 enum res_type type = MEMFILE_TYPE(cft->private);
4555 5607
4556 BUG_ON(type != _OOM_TYPE); 5608 BUG_ON(type != _OOM_TYPE);
4557 event = kmalloc(sizeof(*event), GFP_KERNEL); 5609 event = kmalloc(sizeof(*event), GFP_KERNEL);
@@ -4576,7 +5628,7 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
4576{ 5628{
4577 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 5629 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4578 struct mem_cgroup_eventfd_list *ev, *tmp; 5630 struct mem_cgroup_eventfd_list *ev, *tmp;
4579 int type = MEMFILE_TYPE(cft->private); 5631 enum res_type type = MEMFILE_TYPE(cft->private);
4580 5632
4581 BUG_ON(type != _OOM_TYPE); 5633 BUG_ON(type != _OOM_TYPE);
4582 5634
@@ -4635,12 +5687,33 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4635#ifdef CONFIG_MEMCG_KMEM 5687#ifdef CONFIG_MEMCG_KMEM
4636static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 5688static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
4637{ 5689{
5690 int ret;
5691
5692 memcg->kmemcg_id = -1;
5693 ret = memcg_propagate_kmem(memcg);
5694 if (ret)
5695 return ret;
5696
4638 return mem_cgroup_sockets_init(memcg, ss); 5697 return mem_cgroup_sockets_init(memcg, ss);
4639}; 5698};
4640 5699
4641static void kmem_cgroup_destroy(struct mem_cgroup *memcg) 5700static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
4642{ 5701{
4643 mem_cgroup_sockets_destroy(memcg); 5702 mem_cgroup_sockets_destroy(memcg);
5703
5704 memcg_kmem_mark_dead(memcg);
5705
5706 if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
5707 return;
5708
5709 /*
5710 * Charges already down to 0, undo mem_cgroup_get() done in the charge
5711 * path here, being careful not to race with memcg_uncharge_kmem: it is
5712 * possible that the charges went down to 0 between mark_dead and the
5713 * res_counter read, so in that case, we don't need the put
5714 */
5715 if (memcg_kmem_test_and_clear_dead(memcg))
5716 mem_cgroup_put(memcg);
4644} 5717}
4645#else 5718#else
4646static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 5719static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
@@ -4749,6 +5822,37 @@ static struct cftype mem_cgroup_files[] = {
4749 .read = mem_cgroup_read, 5822 .read = mem_cgroup_read,
4750 }, 5823 },
4751#endif 5824#endif
5825#ifdef CONFIG_MEMCG_KMEM
5826 {
5827 .name = "kmem.limit_in_bytes",
5828 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5829 .write_string = mem_cgroup_write,
5830 .read = mem_cgroup_read,
5831 },
5832 {
5833 .name = "kmem.usage_in_bytes",
5834 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5835 .read = mem_cgroup_read,
5836 },
5837 {
5838 .name = "kmem.failcnt",
5839 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5840 .trigger = mem_cgroup_reset,
5841 .read = mem_cgroup_read,
5842 },
5843 {
5844 .name = "kmem.max_usage_in_bytes",
5845 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5846 .trigger = mem_cgroup_reset,
5847 .read = mem_cgroup_read,
5848 },
5849#ifdef CONFIG_SLABINFO
5850 {
5851 .name = "kmem.slabinfo",
5852 .read_seq_string = mem_cgroup_slabinfo_read,
5853 },
5854#endif
5855#endif
4752 { }, /* terminate */ 5856 { }, /* terminate */
4753}; 5857};
4754 5858
@@ -4816,16 +5920,29 @@ out_free:
4816} 5920}
4817 5921
4818/* 5922/*
4819 * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU, 5923 * At destroying mem_cgroup, references from swap_cgroup can remain.
4820 * but in process context. The work_freeing structure is overlaid 5924 * (scanning all at force_empty is too costly...)
4821 * on the rcu_freeing structure, which itself is overlaid on memsw. 5925 *
5926 * Instead of clearing all references at force_empty, we remember
5927 * the number of reference from swap_cgroup and free mem_cgroup when
5928 * it goes down to 0.
5929 *
5930 * Removal of cgroup itself succeeds regardless of refs from swap.
4822 */ 5931 */
4823static void free_work(struct work_struct *work) 5932
5933static void __mem_cgroup_free(struct mem_cgroup *memcg)
4824{ 5934{
4825 struct mem_cgroup *memcg; 5935 int node;
4826 int size = sizeof(struct mem_cgroup); 5936 int size = sizeof(struct mem_cgroup);
4827 5937
4828 memcg = container_of(work, struct mem_cgroup, work_freeing); 5938 mem_cgroup_remove_from_trees(memcg);
5939 free_css_id(&mem_cgroup_subsys, &memcg->css);
5940
5941 for_each_node(node)
5942 free_mem_cgroup_per_zone_info(memcg, node);
5943
5944 free_percpu(memcg->stat);
5945
4829 /* 5946 /*
4830 * We need to make sure that (at least for now), the jump label 5947 * We need to make sure that (at least for now), the jump label
4831 * destruction code runs outside of the cgroup lock. This is because 5948 * destruction code runs outside of the cgroup lock. This is because
@@ -4837,45 +5954,34 @@ static void free_work(struct work_struct *work)
4837 * to move this code around, and make sure it is outside 5954 * to move this code around, and make sure it is outside
4838 * the cgroup_lock. 5955 * the cgroup_lock.
4839 */ 5956 */
4840 disarm_sock_keys(memcg); 5957 disarm_static_keys(memcg);
4841 if (size < PAGE_SIZE) 5958 if (size < PAGE_SIZE)
4842 kfree(memcg); 5959 kfree(memcg);
4843 else 5960 else
4844 vfree(memcg); 5961 vfree(memcg);
4845} 5962}
4846 5963
4847static void free_rcu(struct rcu_head *rcu_head)
4848{
4849 struct mem_cgroup *memcg;
4850
4851 memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
4852 INIT_WORK(&memcg->work_freeing, free_work);
4853 schedule_work(&memcg->work_freeing);
4854}
4855 5964
4856/* 5965/*
4857 * At destroying mem_cgroup, references from swap_cgroup can remain. 5966 * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU,
4858 * (scanning all at force_empty is too costly...) 5967 * but in process context. The work_freeing structure is overlaid
4859 * 5968 * on the rcu_freeing structure, which itself is overlaid on memsw.
4860 * Instead of clearing all references at force_empty, we remember
4861 * the number of reference from swap_cgroup and free mem_cgroup when
4862 * it goes down to 0.
4863 *
4864 * Removal of cgroup itself succeeds regardless of refs from swap.
4865 */ 5969 */
4866 5970static void free_work(struct work_struct *work)
4867static void __mem_cgroup_free(struct mem_cgroup *memcg)
4868{ 5971{
4869 int node; 5972 struct mem_cgroup *memcg;
4870 5973
4871 mem_cgroup_remove_from_trees(memcg); 5974 memcg = container_of(work, struct mem_cgroup, work_freeing);
4872 free_css_id(&mem_cgroup_subsys, &memcg->css); 5975 __mem_cgroup_free(memcg);
5976}
4873 5977
4874 for_each_node(node) 5978static void free_rcu(struct rcu_head *rcu_head)
4875 free_mem_cgroup_per_zone_info(memcg, node); 5979{
5980 struct mem_cgroup *memcg;
4876 5981
4877 free_percpu(memcg->stat); 5982 memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
4878 call_rcu(&memcg->rcu_freeing, free_rcu); 5983 INIT_WORK(&memcg->work_freeing, free_work);
5984 schedule_work(&memcg->work_freeing);
4879} 5985}
4880 5986
4881static void mem_cgroup_get(struct mem_cgroup *memcg) 5987static void mem_cgroup_get(struct mem_cgroup *memcg)
@@ -4887,7 +5993,7 @@ static void __mem_cgroup_put(struct mem_cgroup *memcg, int count)
4887{ 5993{
4888 if (atomic_sub_and_test(count, &memcg->refcnt)) { 5994 if (atomic_sub_and_test(count, &memcg->refcnt)) {
4889 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 5995 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4890 __mem_cgroup_free(memcg); 5996 call_rcu(&memcg->rcu_freeing, free_rcu);
4891 if (parent) 5997 if (parent)
4892 mem_cgroup_put(parent); 5998 mem_cgroup_put(parent);
4893 } 5999 }
@@ -4994,6 +6100,8 @@ mem_cgroup_css_alloc(struct cgroup *cont)
4994 if (parent && parent->use_hierarchy) { 6100 if (parent && parent->use_hierarchy) {
4995 res_counter_init(&memcg->res, &parent->res); 6101 res_counter_init(&memcg->res, &parent->res);
4996 res_counter_init(&memcg->memsw, &parent->memsw); 6102 res_counter_init(&memcg->memsw, &parent->memsw);
6103 res_counter_init(&memcg->kmem, &parent->kmem);
6104
4997 /* 6105 /*
4998 * We increment refcnt of the parent to ensure that we can 6106 * We increment refcnt of the parent to ensure that we can
4999 * safely access it on res_counter_charge/uncharge. 6107 * safely access it on res_counter_charge/uncharge.
@@ -5004,6 +6112,7 @@ mem_cgroup_css_alloc(struct cgroup *cont)
5004 } else { 6112 } else {
5005 res_counter_init(&memcg->res, NULL); 6113 res_counter_init(&memcg->res, NULL);
5006 res_counter_init(&memcg->memsw, NULL); 6114 res_counter_init(&memcg->memsw, NULL);
6115 res_counter_init(&memcg->kmem, NULL);
5007 /* 6116 /*
5008 * Deeper hierachy with use_hierarchy == false doesn't make 6117 * Deeper hierachy with use_hierarchy == false doesn't make
5009 * much sense so let cgroup subsystem know about this 6118 * much sense so let cgroup subsystem know about this
@@ -5043,6 +6152,7 @@ static void mem_cgroup_css_offline(struct cgroup *cont)
5043 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 6152 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
5044 6153
5045 mem_cgroup_reparent_charges(memcg); 6154 mem_cgroup_reparent_charges(memcg);
6155 mem_cgroup_destroy_all_caches(memcg);
5046} 6156}
5047 6157
5048static void mem_cgroup_css_free(struct cgroup *cont) 6158static void mem_cgroup_css_free(struct cgroup *cont)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 962e353aa86f..d04ed87bfacb 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -590,18 +590,21 @@ static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
590} 590}
591 591
592#ifdef CONFIG_MOVABLE_NODE 592#ifdef CONFIG_MOVABLE_NODE
593/* when CONFIG_MOVABLE_NODE, we allow online node don't have normal memory */ 593/*
594 * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have
595 * normal memory.
596 */
594static bool can_online_high_movable(struct zone *zone) 597static bool can_online_high_movable(struct zone *zone)
595{ 598{
596 return true; 599 return true;
597} 600}
598#else /* #ifdef CONFIG_MOVABLE_NODE */ 601#else /* CONFIG_MOVABLE_NODE */
599/* ensure every online node has NORMAL memory */ 602/* ensure every online node has NORMAL memory */
600static bool can_online_high_movable(struct zone *zone) 603static bool can_online_high_movable(struct zone *zone)
601{ 604{
602 return node_state(zone_to_nid(zone), N_NORMAL_MEMORY); 605 return node_state(zone_to_nid(zone), N_NORMAL_MEMORY);
603} 606}
604#endif /* #ifdef CONFIG_MOVABLE_NODE */ 607#endif /* CONFIG_MOVABLE_NODE */
605 608
606/* check which state of node_states will be changed when online memory */ 609/* check which state of node_states will be changed when online memory */
607static void node_states_check_changes_online(unsigned long nr_pages, 610static void node_states_check_changes_online(unsigned long nr_pages,
@@ -1112,12 +1115,15 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
1112} 1115}
1113 1116
1114#ifdef CONFIG_MOVABLE_NODE 1117#ifdef CONFIG_MOVABLE_NODE
1115/* when CONFIG_MOVABLE_NODE, we allow online node don't have normal memory */ 1118/*
1119 * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have
1120 * normal memory.
1121 */
1116static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) 1122static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
1117{ 1123{
1118 return true; 1124 return true;
1119} 1125}
1120#else /* #ifdef CONFIG_MOVABLE_NODE */ 1126#else /* CONFIG_MOVABLE_NODE */
1121/* ensure the node has NORMAL memory if it is still online */ 1127/* ensure the node has NORMAL memory if it is still online */
1122static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) 1128static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
1123{ 1129{
@@ -1141,7 +1147,7 @@ static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
1141 */ 1147 */
1142 return present_pages == 0; 1148 return present_pages == 0;
1143} 1149}
1144#endif /* #ifdef CONFIG_MOVABLE_NODE */ 1150#endif /* CONFIG_MOVABLE_NODE */
1145 1151
1146/* check which state of node_states will be changed when offline memory */ 1152/* check which state of node_states will be changed when offline memory */
1147static void node_states_check_changes_offline(unsigned long nr_pages, 1153static void node_states_check_changes_offline(unsigned long nr_pages,
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 3dca970367db..94722a4d6b43 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -114,7 +114,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
114 114
115#ifdef CONFIG_NUMA_BALANCING 115#ifdef CONFIG_NUMA_BALANCING
116static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr, 116static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
117 pmd_t *pmd) 117 pmd_t *pmd)
118{ 118{
119 spin_lock(&mm->page_table_lock); 119 spin_lock(&mm->page_table_lock);
120 set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd)); 120 set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
@@ -122,15 +122,15 @@ static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
122} 122}
123#else 123#else
124static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr, 124static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
125 pmd_t *pmd) 125 pmd_t *pmd)
126{ 126{
127 BUG(); 127 BUG();
128} 128}
129#endif /* CONFIG_NUMA_BALANCING */ 129#endif /* CONFIG_NUMA_BALANCING */
130 130
131static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *pud, 131static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
132 unsigned long addr, unsigned long end, pgprot_t newprot, 132 pud_t *pud, unsigned long addr, unsigned long end,
133 int dirty_accountable, int prot_numa) 133 pgprot_t newprot, int dirty_accountable, int prot_numa)
134{ 134{
135 pmd_t *pmd; 135 pmd_t *pmd;
136 unsigned long next; 136 unsigned long next;
@@ -143,7 +143,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *
143 if (pmd_trans_huge(*pmd)) { 143 if (pmd_trans_huge(*pmd)) {
144 if (next - addr != HPAGE_PMD_SIZE) 144 if (next - addr != HPAGE_PMD_SIZE)
145 split_huge_page_pmd(vma, addr, pmd); 145 split_huge_page_pmd(vma, addr, pmd);
146 else if (change_huge_pmd(vma, pmd, addr, newprot, prot_numa)) { 146 else if (change_huge_pmd(vma, pmd, addr, newprot,
147 prot_numa)) {
147 pages += HPAGE_PMD_NR; 148 pages += HPAGE_PMD_NR;
148 continue; 149 continue;
149 } 150 }
@@ -167,9 +168,9 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *
167 return pages; 168 return pages;
168} 169}
169 170
170static inline unsigned long change_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 171static inline unsigned long change_pud_range(struct vm_area_struct *vma,
171 unsigned long addr, unsigned long end, pgprot_t newprot, 172 pgd_t *pgd, unsigned long addr, unsigned long end,
172 int dirty_accountable, int prot_numa) 173 pgprot_t newprot, int dirty_accountable, int prot_numa)
173{ 174{
174 pud_t *pud; 175 pud_t *pud;
175 unsigned long next; 176 unsigned long next;
@@ -304,7 +305,8 @@ success:
304 dirty_accountable = 1; 305 dirty_accountable = 1;
305 } 306 }
306 307
307 change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable, 0); 308 change_protection(vma, start, end, vma->vm_page_prot,
309 dirty_accountable, 0);
308 310
309 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); 311 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
310 vm_stat_account(mm, newflags, vma->vm_file, nrpages); 312 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
@@ -361,8 +363,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
361 error = -EINVAL; 363 error = -EINVAL;
362 if (!(vma->vm_flags & VM_GROWSDOWN)) 364 if (!(vma->vm_flags & VM_GROWSDOWN))
363 goto out; 365 goto out;
364 } 366 } else {
365 else {
366 if (vma->vm_start > start) 367 if (vma->vm_start > start)
367 goto out; 368 goto out;
368 if (unlikely(grows & PROT_GROWSUP)) { 369 if (unlikely(grows & PROT_GROWSUP)) {
@@ -378,9 +379,10 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
378 for (nstart = start ; ; ) { 379 for (nstart = start ; ; ) {
379 unsigned long newflags; 380 unsigned long newflags;
380 381
381 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ 382 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
382 383
383 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); 384 newflags = vm_flags;
385 newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
384 386
385 /* newflags >> 4 shift VM_MAY% in place of VM_% */ 387 /* newflags >> 4 shift VM_MAY% in place of VM_% */
386 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { 388 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d037c8bc1512..2ad2ad168efe 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -371,8 +371,7 @@ static int destroy_compound_page(struct page *page, unsigned long order)
371 int nr_pages = 1 << order; 371 int nr_pages = 1 << order;
372 int bad = 0; 372 int bad = 0;
373 373
374 if (unlikely(compound_order(page) != order) || 374 if (unlikely(compound_order(page) != order)) {
375 unlikely(!PageHead(page))) {
376 bad_page(page); 375 bad_page(page);
377 bad++; 376 bad++;
378 } 377 }
@@ -2613,6 +2612,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2613 int migratetype = allocflags_to_migratetype(gfp_mask); 2612 int migratetype = allocflags_to_migratetype(gfp_mask);
2614 unsigned int cpuset_mems_cookie; 2613 unsigned int cpuset_mems_cookie;
2615 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET; 2614 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
2615 struct mem_cgroup *memcg = NULL;
2616 2616
2617 gfp_mask &= gfp_allowed_mask; 2617 gfp_mask &= gfp_allowed_mask;
2618 2618
@@ -2631,6 +2631,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2631 if (unlikely(!zonelist->_zonerefs->zone)) 2631 if (unlikely(!zonelist->_zonerefs->zone))
2632 return NULL; 2632 return NULL;
2633 2633
2634 /*
2635 * Will only have any effect when __GFP_KMEMCG is set. This is
2636 * verified in the (always inline) callee
2637 */
2638 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
2639 return NULL;
2640
2634retry_cpuset: 2641retry_cpuset:
2635 cpuset_mems_cookie = get_mems_allowed(); 2642 cpuset_mems_cookie = get_mems_allowed();
2636 2643
@@ -2666,6 +2673,8 @@ out:
2666 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) 2673 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2667 goto retry_cpuset; 2674 goto retry_cpuset;
2668 2675
2676 memcg_kmem_commit_charge(page, memcg, order);
2677
2669 return page; 2678 return page;
2670} 2679}
2671EXPORT_SYMBOL(__alloc_pages_nodemask); 2680EXPORT_SYMBOL(__alloc_pages_nodemask);
@@ -2718,6 +2727,31 @@ void free_pages(unsigned long addr, unsigned int order)
2718 2727
2719EXPORT_SYMBOL(free_pages); 2728EXPORT_SYMBOL(free_pages);
2720 2729
2730/*
2731 * __free_memcg_kmem_pages and free_memcg_kmem_pages will free
2732 * pages allocated with __GFP_KMEMCG.
2733 *
2734 * Those pages are accounted to a particular memcg, embedded in the
2735 * corresponding page_cgroup. To avoid adding a hit in the allocator to search
2736 * for that information only to find out that it is NULL for users who have no
2737 * interest in that whatsoever, we provide these functions.
2738 *
2739 * The caller knows better which flags it relies on.
2740 */
2741void __free_memcg_kmem_pages(struct page *page, unsigned int order)
2742{
2743 memcg_kmem_uncharge_pages(page, order);
2744 __free_pages(page, order);
2745}
2746
2747void free_memcg_kmem_pages(unsigned long addr, unsigned int order)
2748{
2749 if (addr != 0) {
2750 VM_BUG_ON(!virt_addr_valid((void *)addr));
2751 __free_memcg_kmem_pages(virt_to_page((void *)addr), order);
2752 }
2753}
2754
2721static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) 2755static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
2722{ 2756{
2723 if (addr) { 2757 if (addr) {
diff --git a/mm/slab.c b/mm/slab.c
index 2c3a2e0394db..e7667a3584bc 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -87,7 +87,6 @@
87 */ 87 */
88 88
89#include <linux/slab.h> 89#include <linux/slab.h>
90#include "slab.h"
91#include <linux/mm.h> 90#include <linux/mm.h>
92#include <linux/poison.h> 91#include <linux/poison.h>
93#include <linux/swap.h> 92#include <linux/swap.h>
@@ -128,6 +127,8 @@
128 127
129#include "internal.h" 128#include "internal.h"
130 129
130#include "slab.h"
131
131/* 132/*
132 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. 133 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
133 * 0 for faster, smaller code (especially in the critical paths). 134 * 0 for faster, smaller code (especially in the critical paths).
@@ -641,6 +642,26 @@ static void init_node_lock_keys(int q)
641 } 642 }
642} 643}
643 644
645static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
646{
647 struct kmem_list3 *l3;
648 l3 = cachep->nodelists[q];
649 if (!l3)
650 return;
651
652 slab_set_lock_classes(cachep, &on_slab_l3_key,
653 &on_slab_alc_key, q);
654}
655
656static inline void on_slab_lock_classes(struct kmem_cache *cachep)
657{
658 int node;
659
660 VM_BUG_ON(OFF_SLAB(cachep));
661 for_each_node(node)
662 on_slab_lock_classes_node(cachep, node);
663}
664
644static inline void init_lock_keys(void) 665static inline void init_lock_keys(void)
645{ 666{
646 int node; 667 int node;
@@ -657,6 +678,14 @@ static inline void init_lock_keys(void)
657{ 678{
658} 679}
659 680
681static inline void on_slab_lock_classes(struct kmem_cache *cachep)
682{
683}
684
685static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node)
686{
687}
688
660static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) 689static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
661{ 690{
662} 691}
@@ -1385,6 +1414,9 @@ static int __cpuinit cpuup_prepare(long cpu)
1385 free_alien_cache(alien); 1414 free_alien_cache(alien);
1386 if (cachep->flags & SLAB_DEBUG_OBJECTS) 1415 if (cachep->flags & SLAB_DEBUG_OBJECTS)
1387 slab_set_debugobj_lock_classes_node(cachep, node); 1416 slab_set_debugobj_lock_classes_node(cachep, node);
1417 else if (!OFF_SLAB(cachep) &&
1418 !(cachep->flags & SLAB_DESTROY_BY_RCU))
1419 on_slab_lock_classes_node(cachep, node);
1388 } 1420 }
1389 init_node_lock_keys(node); 1421 init_node_lock_keys(node);
1390 1422
@@ -1863,6 +1895,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1863 if (page->pfmemalloc) 1895 if (page->pfmemalloc)
1864 SetPageSlabPfmemalloc(page + i); 1896 SetPageSlabPfmemalloc(page + i);
1865 } 1897 }
1898 memcg_bind_pages(cachep, cachep->gfporder);
1866 1899
1867 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { 1900 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1868 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); 1901 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
@@ -1899,9 +1932,11 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1899 __ClearPageSlab(page); 1932 __ClearPageSlab(page);
1900 page++; 1933 page++;
1901 } 1934 }
1935
1936 memcg_release_pages(cachep, cachep->gfporder);
1902 if (current->reclaim_state) 1937 if (current->reclaim_state)
1903 current->reclaim_state->reclaimed_slab += nr_freed; 1938 current->reclaim_state->reclaimed_slab += nr_freed;
1904 free_pages((unsigned long)addr, cachep->gfporder); 1939 free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder);
1905} 1940}
1906 1941
1907static void kmem_rcu_free(struct rcu_head *head) 1942static void kmem_rcu_free(struct rcu_head *head)
@@ -2489,7 +2524,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2489 WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU); 2524 WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
2490 2525
2491 slab_set_debugobj_lock_classes(cachep); 2526 slab_set_debugobj_lock_classes(cachep);
2492 } 2527 } else if (!OFF_SLAB(cachep) && !(flags & SLAB_DESTROY_BY_RCU))
2528 on_slab_lock_classes(cachep);
2493 2529
2494 return 0; 2530 return 0;
2495} 2531}
@@ -3453,6 +3489,8 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3453 if (slab_should_failslab(cachep, flags)) 3489 if (slab_should_failslab(cachep, flags))
3454 return NULL; 3490 return NULL;
3455 3491
3492 cachep = memcg_kmem_get_cache(cachep, flags);
3493
3456 cache_alloc_debugcheck_before(cachep, flags); 3494 cache_alloc_debugcheck_before(cachep, flags);
3457 local_irq_save(save_flags); 3495 local_irq_save(save_flags);
3458 3496
@@ -3538,6 +3576,8 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3538 if (slab_should_failslab(cachep, flags)) 3576 if (slab_should_failslab(cachep, flags))
3539 return NULL; 3577 return NULL;
3540 3578
3579 cachep = memcg_kmem_get_cache(cachep, flags);
3580
3541 cache_alloc_debugcheck_before(cachep, flags); 3581 cache_alloc_debugcheck_before(cachep, flags);
3542 local_irq_save(save_flags); 3582 local_irq_save(save_flags);
3543 objp = __do_cache_alloc(cachep, flags); 3583 objp = __do_cache_alloc(cachep, flags);
@@ -3851,6 +3891,9 @@ EXPORT_SYMBOL(__kmalloc);
3851void kmem_cache_free(struct kmem_cache *cachep, void *objp) 3891void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3852{ 3892{
3853 unsigned long flags; 3893 unsigned long flags;
3894 cachep = cache_from_obj(cachep, objp);
3895 if (!cachep)
3896 return;
3854 3897
3855 local_irq_save(flags); 3898 local_irq_save(flags);
3856 debug_check_no_locks_freed(objp, cachep->object_size); 3899 debug_check_no_locks_freed(objp, cachep->object_size);
@@ -3998,7 +4041,7 @@ static void do_ccupdate_local(void *info)
3998} 4041}
3999 4042
4000/* Always called with the slab_mutex held */ 4043/* Always called with the slab_mutex held */
4001static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 4044static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
4002 int batchcount, int shared, gfp_t gfp) 4045 int batchcount, int shared, gfp_t gfp)
4003{ 4046{
4004 struct ccupdate_struct *new; 4047 struct ccupdate_struct *new;
@@ -4041,12 +4084,49 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
4041 return alloc_kmemlist(cachep, gfp); 4084 return alloc_kmemlist(cachep, gfp);
4042} 4085}
4043 4086
4087static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
4088 int batchcount, int shared, gfp_t gfp)
4089{
4090 int ret;
4091 struct kmem_cache *c = NULL;
4092 int i = 0;
4093
4094 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
4095
4096 if (slab_state < FULL)
4097 return ret;
4098
4099 if ((ret < 0) || !is_root_cache(cachep))
4100 return ret;
4101
4102 VM_BUG_ON(!mutex_is_locked(&slab_mutex));
4103 for_each_memcg_cache_index(i) {
4104 c = cache_from_memcg(cachep, i);
4105 if (c)
4106 /* return value determined by the parent cache only */
4107 __do_tune_cpucache(c, limit, batchcount, shared, gfp);
4108 }
4109
4110 return ret;
4111}
4112
4044/* Called with slab_mutex held always */ 4113/* Called with slab_mutex held always */
4045static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) 4114static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
4046{ 4115{
4047 int err; 4116 int err;
4048 int limit, shared; 4117 int limit = 0;
4118 int shared = 0;
4119 int batchcount = 0;
4120
4121 if (!is_root_cache(cachep)) {
4122 struct kmem_cache *root = memcg_root_cache(cachep);
4123 limit = root->limit;
4124 shared = root->shared;
4125 batchcount = root->batchcount;
4126 }
4049 4127
4128 if (limit && shared && batchcount)
4129 goto skip_setup;
4050 /* 4130 /*
4051 * The head array serves three purposes: 4131 * The head array serves three purposes:
4052 * - create a LIFO ordering, i.e. return objects that are cache-warm 4132 * - create a LIFO ordering, i.e. return objects that are cache-warm
@@ -4088,7 +4168,9 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
4088 if (limit > 32) 4168 if (limit > 32)
4089 limit = 32; 4169 limit = 32;
4090#endif 4170#endif
4091 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp); 4171 batchcount = (limit + 1) / 2;
4172skip_setup:
4173 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
4092 if (err) 4174 if (err)
4093 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 4175 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
4094 cachep->name, -err); 4176 cachep->name, -err);
diff --git a/mm/slab.h b/mm/slab.h
index 1cb9c9ee0e6f..34a98d642196 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -43,12 +43,15 @@ extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
43extern void create_boot_cache(struct kmem_cache *, const char *name, 43extern void create_boot_cache(struct kmem_cache *, const char *name,
44 size_t size, unsigned long flags); 44 size_t size, unsigned long flags);
45 45
46struct mem_cgroup;
46#ifdef CONFIG_SLUB 47#ifdef CONFIG_SLUB
47struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, 48struct kmem_cache *
48 size_t align, unsigned long flags, void (*ctor)(void *)); 49__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
50 size_t align, unsigned long flags, void (*ctor)(void *));
49#else 51#else
50static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, 52static inline struct kmem_cache *
51 size_t align, unsigned long flags, void (*ctor)(void *)) 53__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
54 size_t align, unsigned long flags, void (*ctor)(void *))
52{ return NULL; } 55{ return NULL; }
53#endif 56#endif
54 57
@@ -100,4 +103,130 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
100void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 103void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
101ssize_t slabinfo_write(struct file *file, const char __user *buffer, 104ssize_t slabinfo_write(struct file *file, const char __user *buffer,
102 size_t count, loff_t *ppos); 105 size_t count, loff_t *ppos);
106
107#ifdef CONFIG_MEMCG_KMEM
108static inline bool is_root_cache(struct kmem_cache *s)
109{
110 return !s->memcg_params || s->memcg_params->is_root_cache;
111}
112
113static inline bool cache_match_memcg(struct kmem_cache *cachep,
114 struct mem_cgroup *memcg)
115{
116 return (is_root_cache(cachep) && !memcg) ||
117 (cachep->memcg_params->memcg == memcg);
118}
119
120static inline void memcg_bind_pages(struct kmem_cache *s, int order)
121{
122 if (!is_root_cache(s))
123 atomic_add(1 << order, &s->memcg_params->nr_pages);
124}
125
126static inline void memcg_release_pages(struct kmem_cache *s, int order)
127{
128 if (is_root_cache(s))
129 return;
130
131 if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages))
132 mem_cgroup_destroy_cache(s);
133}
134
135static inline bool slab_equal_or_root(struct kmem_cache *s,
136 struct kmem_cache *p)
137{
138 return (p == s) ||
139 (s->memcg_params && (p == s->memcg_params->root_cache));
140}
141
142/*
143 * We use suffixes to the name in memcg because we can't have caches
144 * created in the system with the same name. But when we print them
145 * locally, better refer to them with the base name
146 */
147static inline const char *cache_name(struct kmem_cache *s)
148{
149 if (!is_root_cache(s))
150 return s->memcg_params->root_cache->name;
151 return s->name;
152}
153
154static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
155{
156 return s->memcg_params->memcg_caches[idx];
157}
158
159static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
160{
161 if (is_root_cache(s))
162 return s;
163 return s->memcg_params->root_cache;
164}
165#else
166static inline bool is_root_cache(struct kmem_cache *s)
167{
168 return true;
169}
170
171static inline bool cache_match_memcg(struct kmem_cache *cachep,
172 struct mem_cgroup *memcg)
173{
174 return true;
175}
176
177static inline void memcg_bind_pages(struct kmem_cache *s, int order)
178{
179}
180
181static inline void memcg_release_pages(struct kmem_cache *s, int order)
182{
183}
184
185static inline bool slab_equal_or_root(struct kmem_cache *s,
186 struct kmem_cache *p)
187{
188 return true;
189}
190
191static inline const char *cache_name(struct kmem_cache *s)
192{
193 return s->name;
194}
195
196static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
197{
198 return NULL;
199}
200
201static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
202{
203 return s;
204}
205#endif
206
207static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
208{
209 struct kmem_cache *cachep;
210 struct page *page;
211
212 /*
213 * When kmemcg is not being used, both assignments should return the
214 * same value. but we don't want to pay the assignment price in that
215 * case. If it is not compiled in, the compiler should be smart enough
216 * to not do even the assignment. In that case, slab_equal_or_root
217 * will also be a constant.
218 */
219 if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
220 return s;
221
222 page = virt_to_head_page(x);
223 cachep = page->slab_cache;
224 if (slab_equal_or_root(cachep, s))
225 return cachep;
226
227 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
228 __FUNCTION__, cachep->name, s->name);
229 WARN_ON_ONCE(1);
230 return s;
231}
103#endif 232#endif
diff --git a/mm/slab_common.c b/mm/slab_common.c
index a8e76d79ee65..3f3cd97d3fdf 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -18,6 +18,7 @@
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
20#include <asm/page.h> 20#include <asm/page.h>
21#include <linux/memcontrol.h>
21 22
22#include "slab.h" 23#include "slab.h"
23 24
@@ -27,7 +28,8 @@ DEFINE_MUTEX(slab_mutex);
27struct kmem_cache *kmem_cache; 28struct kmem_cache *kmem_cache;
28 29
29#ifdef CONFIG_DEBUG_VM 30#ifdef CONFIG_DEBUG_VM
30static int kmem_cache_sanity_check(const char *name, size_t size) 31static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
32 size_t size)
31{ 33{
32 struct kmem_cache *s = NULL; 34 struct kmem_cache *s = NULL;
33 35
@@ -53,7 +55,13 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
53 continue; 55 continue;
54 } 56 }
55 57
56 if (!strcmp(s->name, name)) { 58 /*
59 * For simplicity, we won't check this in the list of memcg
60 * caches. We have control over memcg naming, and if there
61 * aren't duplicates in the global list, there won't be any
62 * duplicates in the memcg lists as well.
63 */
64 if (!memcg && !strcmp(s->name, name)) {
57 pr_err("%s (%s): Cache name already exists.\n", 65 pr_err("%s (%s): Cache name already exists.\n",
58 __func__, name); 66 __func__, name);
59 dump_stack(); 67 dump_stack();
@@ -66,12 +74,41 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
66 return 0; 74 return 0;
67} 75}
68#else 76#else
69static inline int kmem_cache_sanity_check(const char *name, size_t size) 77static inline int kmem_cache_sanity_check(struct mem_cgroup *memcg,
78 const char *name, size_t size)
70{ 79{
71 return 0; 80 return 0;
72} 81}
73#endif 82#endif
74 83
84#ifdef CONFIG_MEMCG_KMEM
85int memcg_update_all_caches(int num_memcgs)
86{
87 struct kmem_cache *s;
88 int ret = 0;
89 mutex_lock(&slab_mutex);
90
91 list_for_each_entry(s, &slab_caches, list) {
92 if (!is_root_cache(s))
93 continue;
94
95 ret = memcg_update_cache_size(s, num_memcgs);
96 /*
97 * See comment in memcontrol.c, memcg_update_cache_size:
98 * Instead of freeing the memory, we'll just leave the caches
99 * up to this point in an updated state.
100 */
101 if (ret)
102 goto out;
103 }
104
105 memcg_update_array_size(num_memcgs);
106out:
107 mutex_unlock(&slab_mutex);
108 return ret;
109}
110#endif
111
75/* 112/*
76 * Figure out what the alignment of the objects will be given a set of 113 * Figure out what the alignment of the objects will be given a set of
77 * flags, a user specified alignment and the size of the objects. 114 * flags, a user specified alignment and the size of the objects.
@@ -125,8 +162,10 @@ unsigned long calculate_alignment(unsigned long flags,
125 * as davem. 162 * as davem.
126 */ 163 */
127 164
128struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, 165struct kmem_cache *
129 unsigned long flags, void (*ctor)(void *)) 166kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
167 size_t align, unsigned long flags, void (*ctor)(void *),
168 struct kmem_cache *parent_cache)
130{ 169{
131 struct kmem_cache *s = NULL; 170 struct kmem_cache *s = NULL;
132 int err = 0; 171 int err = 0;
@@ -134,7 +173,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
134 get_online_cpus(); 173 get_online_cpus();
135 mutex_lock(&slab_mutex); 174 mutex_lock(&slab_mutex);
136 175
137 if (!kmem_cache_sanity_check(name, size) == 0) 176 if (!kmem_cache_sanity_check(memcg, name, size) == 0)
138 goto out_locked; 177 goto out_locked;
139 178
140 /* 179 /*
@@ -145,7 +184,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
145 */ 184 */
146 flags &= CACHE_CREATE_MASK; 185 flags &= CACHE_CREATE_MASK;
147 186
148 s = __kmem_cache_alias(name, size, align, flags, ctor); 187 s = __kmem_cache_alias(memcg, name, size, align, flags, ctor);
149 if (s) 188 if (s)
150 goto out_locked; 189 goto out_locked;
151 190
@@ -154,6 +193,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
154 s->object_size = s->size = size; 193 s->object_size = s->size = size;
155 s->align = calculate_alignment(flags, align, size); 194 s->align = calculate_alignment(flags, align, size);
156 s->ctor = ctor; 195 s->ctor = ctor;
196
197 if (memcg_register_cache(memcg, s, parent_cache)) {
198 kmem_cache_free(kmem_cache, s);
199 err = -ENOMEM;
200 goto out_locked;
201 }
202
157 s->name = kstrdup(name, GFP_KERNEL); 203 s->name = kstrdup(name, GFP_KERNEL);
158 if (!s->name) { 204 if (!s->name) {
159 kmem_cache_free(kmem_cache, s); 205 kmem_cache_free(kmem_cache, s);
@@ -163,10 +209,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
163 209
164 err = __kmem_cache_create(s, flags); 210 err = __kmem_cache_create(s, flags);
165 if (!err) { 211 if (!err) {
166
167 s->refcount = 1; 212 s->refcount = 1;
168 list_add(&s->list, &slab_caches); 213 list_add(&s->list, &slab_caches);
169 214 memcg_cache_list_add(memcg, s);
170 } else { 215 } else {
171 kfree(s->name); 216 kfree(s->name);
172 kmem_cache_free(kmem_cache, s); 217 kmem_cache_free(kmem_cache, s);
@@ -194,10 +239,20 @@ out_locked:
194 239
195 return s; 240 return s;
196} 241}
242
243struct kmem_cache *
244kmem_cache_create(const char *name, size_t size, size_t align,
245 unsigned long flags, void (*ctor)(void *))
246{
247 return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor, NULL);
248}
197EXPORT_SYMBOL(kmem_cache_create); 249EXPORT_SYMBOL(kmem_cache_create);
198 250
199void kmem_cache_destroy(struct kmem_cache *s) 251void kmem_cache_destroy(struct kmem_cache *s)
200{ 252{
253 /* Destroy all the children caches if we aren't a memcg cache */
254 kmem_cache_destroy_memcg_children(s);
255
201 get_online_cpus(); 256 get_online_cpus();
202 mutex_lock(&slab_mutex); 257 mutex_lock(&slab_mutex);
203 s->refcount--; 258 s->refcount--;
@@ -209,6 +264,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
209 if (s->flags & SLAB_DESTROY_BY_RCU) 264 if (s->flags & SLAB_DESTROY_BY_RCU)
210 rcu_barrier(); 265 rcu_barrier();
211 266
267 memcg_release_cache(s);
212 kfree(s->name); 268 kfree(s->name);
213 kmem_cache_free(kmem_cache, s); 269 kmem_cache_free(kmem_cache, s);
214 } else { 270 } else {
@@ -267,7 +323,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
267 323
268 324
269#ifdef CONFIG_SLABINFO 325#ifdef CONFIG_SLABINFO
270static void print_slabinfo_header(struct seq_file *m) 326void print_slabinfo_header(struct seq_file *m)
271{ 327{
272 /* 328 /*
273 * Output format version, so at least we can change it 329 * Output format version, so at least we can change it
@@ -311,16 +367,43 @@ static void s_stop(struct seq_file *m, void *p)
311 mutex_unlock(&slab_mutex); 367 mutex_unlock(&slab_mutex);
312} 368}
313 369
314static int s_show(struct seq_file *m, void *p) 370static void
371memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
372{
373 struct kmem_cache *c;
374 struct slabinfo sinfo;
375 int i;
376
377 if (!is_root_cache(s))
378 return;
379
380 for_each_memcg_cache_index(i) {
381 c = cache_from_memcg(s, i);
382 if (!c)
383 continue;
384
385 memset(&sinfo, 0, sizeof(sinfo));
386 get_slabinfo(c, &sinfo);
387
388 info->active_slabs += sinfo.active_slabs;
389 info->num_slabs += sinfo.num_slabs;
390 info->shared_avail += sinfo.shared_avail;
391 info->active_objs += sinfo.active_objs;
392 info->num_objs += sinfo.num_objs;
393 }
394}
395
396int cache_show(struct kmem_cache *s, struct seq_file *m)
315{ 397{
316 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
317 struct slabinfo sinfo; 398 struct slabinfo sinfo;
318 399
319 memset(&sinfo, 0, sizeof(sinfo)); 400 memset(&sinfo, 0, sizeof(sinfo));
320 get_slabinfo(s, &sinfo); 401 get_slabinfo(s, &sinfo);
321 402
403 memcg_accumulate_slabinfo(s, &sinfo);
404
322 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 405 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
323 s->name, sinfo.active_objs, sinfo.num_objs, s->size, 406 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
324 sinfo.objects_per_slab, (1 << sinfo.cache_order)); 407 sinfo.objects_per_slab, (1 << sinfo.cache_order));
325 408
326 seq_printf(m, " : tunables %4u %4u %4u", 409 seq_printf(m, " : tunables %4u %4u %4u",
@@ -332,6 +415,15 @@ static int s_show(struct seq_file *m, void *p)
332 return 0; 415 return 0;
333} 416}
334 417
418static int s_show(struct seq_file *m, void *p)
419{
420 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
421
422 if (!is_root_cache(s))
423 return 0;
424 return cache_show(s, m);
425}
426
335/* 427/*
336 * slabinfo_op - iterator that generates /proc/slabinfo 428 * slabinfo_op - iterator that generates /proc/slabinfo
337 * 429 *
diff --git a/mm/slob.c b/mm/slob.c
index 795bab7d391d..a99fdf7a0907 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -58,7 +58,6 @@
58 58
59#include <linux/kernel.h> 59#include <linux/kernel.h>
60#include <linux/slab.h> 60#include <linux/slab.h>
61#include "slab.h"
62 61
63#include <linux/mm.h> 62#include <linux/mm.h>
64#include <linux/swap.h> /* struct reclaim_state */ 63#include <linux/swap.h> /* struct reclaim_state */
@@ -73,6 +72,7 @@
73 72
74#include <linux/atomic.h> 73#include <linux/atomic.h>
75 74
75#include "slab.h"
76/* 76/*
77 * slob_block has a field 'units', which indicates size of block if +ve, 77 * slob_block has a field 'units', which indicates size of block if +ve,
78 * or offset of next block if -ve (in SLOB_UNITs). 78 * or offset of next block if -ve (in SLOB_UNITs).
diff --git a/mm/slub.c b/mm/slub.c
index 87f9f32bf0cd..ba2ca53f6c3a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -31,6 +31,7 @@
31#include <linux/fault-inject.h> 31#include <linux/fault-inject.h>
32#include <linux/stacktrace.h> 32#include <linux/stacktrace.h>
33#include <linux/prefetch.h> 33#include <linux/prefetch.h>
34#include <linux/memcontrol.h>
34 35
35#include <trace/events/kmem.h> 36#include <trace/events/kmem.h>
36 37
@@ -200,13 +201,14 @@ enum track_item { TRACK_ALLOC, TRACK_FREE };
200static int sysfs_slab_add(struct kmem_cache *); 201static int sysfs_slab_add(struct kmem_cache *);
201static int sysfs_slab_alias(struct kmem_cache *, const char *); 202static int sysfs_slab_alias(struct kmem_cache *, const char *);
202static void sysfs_slab_remove(struct kmem_cache *); 203static void sysfs_slab_remove(struct kmem_cache *);
203 204static void memcg_propagate_slab_attrs(struct kmem_cache *s);
204#else 205#else
205static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 206static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
206static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 207static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
207 { return 0; } 208 { return 0; }
208static inline void sysfs_slab_remove(struct kmem_cache *s) { } 209static inline void sysfs_slab_remove(struct kmem_cache *s) { }
209 210
211static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
210#endif 212#endif
211 213
212static inline void stat(const struct kmem_cache *s, enum stat_item si) 214static inline void stat(const struct kmem_cache *s, enum stat_item si)
@@ -1343,6 +1345,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1343 void *start; 1345 void *start;
1344 void *last; 1346 void *last;
1345 void *p; 1347 void *p;
1348 int order;
1346 1349
1347 BUG_ON(flags & GFP_SLAB_BUG_MASK); 1350 BUG_ON(flags & GFP_SLAB_BUG_MASK);
1348 1351
@@ -1351,7 +1354,9 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1351 if (!page) 1354 if (!page)
1352 goto out; 1355 goto out;
1353 1356
1357 order = compound_order(page);
1354 inc_slabs_node(s, page_to_nid(page), page->objects); 1358 inc_slabs_node(s, page_to_nid(page), page->objects);
1359 memcg_bind_pages(s, order);
1355 page->slab_cache = s; 1360 page->slab_cache = s;
1356 __SetPageSlab(page); 1361 __SetPageSlab(page);
1357 if (page->pfmemalloc) 1362 if (page->pfmemalloc)
@@ -1360,7 +1365,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1360 start = page_address(page); 1365 start = page_address(page);
1361 1366
1362 if (unlikely(s->flags & SLAB_POISON)) 1367 if (unlikely(s->flags & SLAB_POISON))
1363 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page)); 1368 memset(start, POISON_INUSE, PAGE_SIZE << order);
1364 1369
1365 last = start; 1370 last = start;
1366 for_each_object(p, s, start, page->objects) { 1371 for_each_object(p, s, start, page->objects) {
@@ -1401,10 +1406,12 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1401 1406
1402 __ClearPageSlabPfmemalloc(page); 1407 __ClearPageSlabPfmemalloc(page);
1403 __ClearPageSlab(page); 1408 __ClearPageSlab(page);
1409
1410 memcg_release_pages(s, order);
1404 reset_page_mapcount(page); 1411 reset_page_mapcount(page);
1405 if (current->reclaim_state) 1412 if (current->reclaim_state)
1406 current->reclaim_state->reclaimed_slab += pages; 1413 current->reclaim_state->reclaimed_slab += pages;
1407 __free_pages(page, order); 1414 __free_memcg_kmem_pages(page, order);
1408} 1415}
1409 1416
1410#define need_reserve_slab_rcu \ 1417#define need_reserve_slab_rcu \
@@ -2322,6 +2329,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2322 if (slab_pre_alloc_hook(s, gfpflags)) 2329 if (slab_pre_alloc_hook(s, gfpflags))
2323 return NULL; 2330 return NULL;
2324 2331
2332 s = memcg_kmem_get_cache(s, gfpflags);
2325redo: 2333redo:
2326 2334
2327 /* 2335 /*
@@ -2610,19 +2618,10 @@ redo:
2610 2618
2611void kmem_cache_free(struct kmem_cache *s, void *x) 2619void kmem_cache_free(struct kmem_cache *s, void *x)
2612{ 2620{
2613 struct page *page; 2621 s = cache_from_obj(s, x);
2614 2622 if (!s)
2615 page = virt_to_head_page(x);
2616
2617 if (kmem_cache_debug(s) && page->slab_cache != s) {
2618 pr_err("kmem_cache_free: Wrong slab cache. %s but object"
2619 " is from %s\n", page->slab_cache->name, s->name);
2620 WARN_ON_ONCE(1);
2621 return; 2623 return;
2622 } 2624 slab_free(s, virt_to_head_page(x), x, _RET_IP_);
2623
2624 slab_free(s, page, x, _RET_IP_);
2625
2626 trace_kmem_cache_free(_RET_IP_, x); 2625 trace_kmem_cache_free(_RET_IP_, x);
2627} 2626}
2628EXPORT_SYMBOL(kmem_cache_free); 2627EXPORT_SYMBOL(kmem_cache_free);
@@ -3154,8 +3153,19 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
3154{ 3153{
3155 int rc = kmem_cache_close(s); 3154 int rc = kmem_cache_close(s);
3156 3155
3157 if (!rc) 3156 if (!rc) {
3157 /*
3158 * We do the same lock strategy around sysfs_slab_add, see
3159 * __kmem_cache_create. Because this is pretty much the last
3160 * operation we do and the lock will be released shortly after
3161 * that in slab_common.c, we could just move sysfs_slab_remove
3162 * to a later point in common code. We should do that when we
3163 * have a common sysfs framework for all allocators.
3164 */
3165 mutex_unlock(&slab_mutex);
3158 sysfs_slab_remove(s); 3166 sysfs_slab_remove(s);
3167 mutex_lock(&slab_mutex);
3168 }
3159 3169
3160 return rc; 3170 return rc;
3161} 3171}
@@ -3292,7 +3302,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3292 struct page *page; 3302 struct page *page;
3293 void *ptr = NULL; 3303 void *ptr = NULL;
3294 3304
3295 flags |= __GFP_COMP | __GFP_NOTRACK; 3305 flags |= __GFP_COMP | __GFP_NOTRACK | __GFP_KMEMCG;
3296 page = alloc_pages_node(node, flags, get_order(size)); 3306 page = alloc_pages_node(node, flags, get_order(size));
3297 if (page) 3307 if (page)
3298 ptr = page_address(page); 3308 ptr = page_address(page);
@@ -3398,7 +3408,7 @@ void kfree(const void *x)
3398 if (unlikely(!PageSlab(page))) { 3408 if (unlikely(!PageSlab(page))) {
3399 BUG_ON(!PageCompound(page)); 3409 BUG_ON(!PageCompound(page));
3400 kmemleak_free(x); 3410 kmemleak_free(x);
3401 __free_pages(page, compound_order(page)); 3411 __free_memcg_kmem_pages(page, compound_order(page));
3402 return; 3412 return;
3403 } 3413 }
3404 slab_free(page->slab_cache, page, object, _RET_IP_); 3414 slab_free(page->slab_cache, page, object, _RET_IP_);
@@ -3786,7 +3796,7 @@ static int slab_unmergeable(struct kmem_cache *s)
3786 return 0; 3796 return 0;
3787} 3797}
3788 3798
3789static struct kmem_cache *find_mergeable(size_t size, 3799static struct kmem_cache *find_mergeable(struct mem_cgroup *memcg, size_t size,
3790 size_t align, unsigned long flags, const char *name, 3800 size_t align, unsigned long flags, const char *name,
3791 void (*ctor)(void *)) 3801 void (*ctor)(void *))
3792{ 3802{
@@ -3822,17 +3832,21 @@ static struct kmem_cache *find_mergeable(size_t size,
3822 if (s->size - size >= sizeof(void *)) 3832 if (s->size - size >= sizeof(void *))
3823 continue; 3833 continue;
3824 3834
3835 if (!cache_match_memcg(s, memcg))
3836 continue;
3837
3825 return s; 3838 return s;
3826 } 3839 }
3827 return NULL; 3840 return NULL;
3828} 3841}
3829 3842
3830struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, 3843struct kmem_cache *
3831 size_t align, unsigned long flags, void (*ctor)(void *)) 3844__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
3845 size_t align, unsigned long flags, void (*ctor)(void *))
3832{ 3846{
3833 struct kmem_cache *s; 3847 struct kmem_cache *s;
3834 3848
3835 s = find_mergeable(size, align, flags, name, ctor); 3849 s = find_mergeable(memcg, size, align, flags, name, ctor);
3836 if (s) { 3850 if (s) {
3837 s->refcount++; 3851 s->refcount++;
3838 /* 3852 /*
@@ -3863,6 +3877,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
3863 if (slab_state <= UP) 3877 if (slab_state <= UP)
3864 return 0; 3878 return 0;
3865 3879
3880 memcg_propagate_slab_attrs(s);
3866 mutex_unlock(&slab_mutex); 3881 mutex_unlock(&slab_mutex);
3867 err = sysfs_slab_add(s); 3882 err = sysfs_slab_add(s);
3868 mutex_lock(&slab_mutex); 3883 mutex_lock(&slab_mutex);
@@ -5096,10 +5111,95 @@ static ssize_t slab_attr_store(struct kobject *kobj,
5096 return -EIO; 5111 return -EIO;
5097 5112
5098 err = attribute->store(s, buf, len); 5113 err = attribute->store(s, buf, len);
5114#ifdef CONFIG_MEMCG_KMEM
5115 if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
5116 int i;
5099 5117
5118 mutex_lock(&slab_mutex);
5119 if (s->max_attr_size < len)
5120 s->max_attr_size = len;
5121
5122 /*
5123 * This is a best effort propagation, so this function's return
5124 * value will be determined by the parent cache only. This is
5125 * basically because not all attributes will have a well
5126 * defined semantics for rollbacks - most of the actions will
5127 * have permanent effects.
5128 *
5129 * Returning the error value of any of the children that fail
5130 * is not 100 % defined, in the sense that users seeing the
5131 * error code won't be able to know anything about the state of
5132 * the cache.
5133 *
5134 * Only returning the error code for the parent cache at least
5135 * has well defined semantics. The cache being written to
5136 * directly either failed or succeeded, in which case we loop
5137 * through the descendants with best-effort propagation.
5138 */
5139 for_each_memcg_cache_index(i) {
5140 struct kmem_cache *c = cache_from_memcg(s, i);
5141 if (c)
5142 attribute->store(c, buf, len);
5143 }
5144 mutex_unlock(&slab_mutex);
5145 }
5146#endif
5100 return err; 5147 return err;
5101} 5148}
5102 5149
5150static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5151{
5152#ifdef CONFIG_MEMCG_KMEM
5153 int i;
5154 char *buffer = NULL;
5155
5156 if (!is_root_cache(s))
5157 return;
5158
5159 /*
5160 * This mean this cache had no attribute written. Therefore, no point
5161 * in copying default values around
5162 */
5163 if (!s->max_attr_size)
5164 return;
5165
5166 for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5167 char mbuf[64];
5168 char *buf;
5169 struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
5170
5171 if (!attr || !attr->store || !attr->show)
5172 continue;
5173
5174 /*
5175 * It is really bad that we have to allocate here, so we will
5176 * do it only as a fallback. If we actually allocate, though,
5177 * we can just use the allocated buffer until the end.
5178 *
5179 * Most of the slub attributes will tend to be very small in
5180 * size, but sysfs allows buffers up to a page, so they can
5181 * theoretically happen.
5182 */
5183 if (buffer)
5184 buf = buffer;
5185 else if (s->max_attr_size < ARRAY_SIZE(mbuf))
5186 buf = mbuf;
5187 else {
5188 buffer = (char *) get_zeroed_page(GFP_KERNEL);
5189 if (WARN_ON(!buffer))
5190 continue;
5191 buf = buffer;
5192 }
5193
5194 attr->show(s->memcg_params->root_cache, buf);
5195 attr->store(s, buf, strlen(buf));
5196 }
5197
5198 if (buffer)
5199 free_page((unsigned long)buffer);
5200#endif
5201}
5202
5103static const struct sysfs_ops slab_sysfs_ops = { 5203static const struct sysfs_ops slab_sysfs_ops = {
5104 .show = slab_attr_show, 5204 .show = slab_attr_show,
5105 .store = slab_attr_store, 5205 .store = slab_attr_store,
@@ -5156,6 +5256,12 @@ static char *create_unique_id(struct kmem_cache *s)
5156 if (p != name + 1) 5256 if (p != name + 1)
5157 *p++ = '-'; 5257 *p++ = '-';
5158 p += sprintf(p, "%07d", s->size); 5258 p += sprintf(p, "%07d", s->size);
5259
5260#ifdef CONFIG_MEMCG_KMEM
5261 if (!is_root_cache(s))
5262 p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg));
5263#endif
5264
5159 BUG_ON(p > name + ID_STR_LENGTH - 1); 5265 BUG_ON(p > name + ID_STR_LENGTH - 1);
5160 return name; 5266 return name;
5161} 5267}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7f3096137b8a..828530e2794a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1177,7 +1177,11 @@ int isolate_lru_page(struct page *page)
1177} 1177}
1178 1178
1179/* 1179/*
1180 * Are there way too many processes in the direct reclaim path already? 1180 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1181 * then get resheduled. When there are massive number of tasks doing page
1182 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1183 * the LRU list will go small and be scanned faster than necessary, leading to
1184 * unnecessary swapping, thrashing and OOM.
1181 */ 1185 */
1182static int too_many_isolated(struct zone *zone, int file, 1186static int too_many_isolated(struct zone *zone, int file,
1183 struct scan_control *sc) 1187 struct scan_control *sc)
@@ -1198,6 +1202,14 @@ static int too_many_isolated(struct zone *zone, int file,
1198 isolated = zone_page_state(zone, NR_ISOLATED_ANON); 1202 isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1199 } 1203 }
1200 1204
1205 /*
1206 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1207 * won't get blocked by normal direct-reclaimers, forming a circular
1208 * deadlock.
1209 */
1210 if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS)
1211 inactive >>= 3;
1212
1201 return isolated > inactive; 1213 return isolated > inactive;
1202} 1214}
1203 1215
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index f49da5814bc3..350bf62b2ae3 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -14,49 +14,45 @@ static ssize_t show_type(struct device *cdev,
14 struct device_attribute *attr, char *buf) 14 struct device_attribute *attr, char *buf)
15{ 15{
16 struct atm_dev *adev = to_atm_dev(cdev); 16 struct atm_dev *adev = to_atm_dev(cdev);
17 return sprintf(buf, "%s\n", adev->type); 17
18 return scnprintf(buf, PAGE_SIZE, "%s\n", adev->type);
18} 19}
19 20
20static ssize_t show_address(struct device *cdev, 21static ssize_t show_address(struct device *cdev,
21 struct device_attribute *attr, char *buf) 22 struct device_attribute *attr, char *buf)
22{ 23{
23 char *pos = buf;
24 struct atm_dev *adev = to_atm_dev(cdev); 24 struct atm_dev *adev = to_atm_dev(cdev);
25 int i;
26
27 for (i = 0; i < (ESI_LEN - 1); i++)
28 pos += sprintf(pos, "%02x:", adev->esi[i]);
29 pos += sprintf(pos, "%02x\n", adev->esi[i]);
30 25
31 return pos - buf; 26 return scnprintf(buf, PAGE_SIZE, "%pM\n", adev->esi);
32} 27}
33 28
34static ssize_t show_atmaddress(struct device *cdev, 29static ssize_t show_atmaddress(struct device *cdev,
35 struct device_attribute *attr, char *buf) 30 struct device_attribute *attr, char *buf)
36{ 31{
37 unsigned long flags; 32 unsigned long flags;
38 char *pos = buf;
39 struct atm_dev *adev = to_atm_dev(cdev); 33 struct atm_dev *adev = to_atm_dev(cdev);
40 struct atm_dev_addr *aaddr; 34 struct atm_dev_addr *aaddr;
41 int bin[] = { 1, 2, 10, 6, 1 }, *fmt = bin; 35 int bin[] = { 1, 2, 10, 6, 1 }, *fmt = bin;
42 int i, j; 36 int i, j, count = 0;
43 37
44 spin_lock_irqsave(&adev->lock, flags); 38 spin_lock_irqsave(&adev->lock, flags);
45 list_for_each_entry(aaddr, &adev->local, entry) { 39 list_for_each_entry(aaddr, &adev->local, entry) {
46 for (i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) { 40 for (i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) {
47 if (j == *fmt) { 41 if (j == *fmt) {
48 pos += sprintf(pos, "."); 42 count += scnprintf(buf + count,
43 PAGE_SIZE - count, ".");
49 ++fmt; 44 ++fmt;
50 j = 0; 45 j = 0;
51 } 46 }
52 pos += sprintf(pos, "%02x", 47 count += scnprintf(buf + count,
53 aaddr->addr.sas_addr.prv[i]); 48 PAGE_SIZE - count, "%02x",
49 aaddr->addr.sas_addr.prv[i]);
54 } 50 }
55 pos += sprintf(pos, "\n"); 51 count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
56 } 52 }
57 spin_unlock_irqrestore(&adev->lock, flags); 53 spin_unlock_irqrestore(&adev->lock, flags);
58 54
59 return pos - buf; 55 return count;
60} 56}
61 57
62static ssize_t show_atmindex(struct device *cdev, 58static ssize_t show_atmindex(struct device *cdev,
@@ -64,25 +60,21 @@ static ssize_t show_atmindex(struct device *cdev,
64{ 60{
65 struct atm_dev *adev = to_atm_dev(cdev); 61 struct atm_dev *adev = to_atm_dev(cdev);
66 62
67 return sprintf(buf, "%d\n", adev->number); 63 return scnprintf(buf, PAGE_SIZE, "%d\n", adev->number);
68} 64}
69 65
70static ssize_t show_carrier(struct device *cdev, 66static ssize_t show_carrier(struct device *cdev,
71 struct device_attribute *attr, char *buf) 67 struct device_attribute *attr, char *buf)
72{ 68{
73 char *pos = buf;
74 struct atm_dev *adev = to_atm_dev(cdev); 69 struct atm_dev *adev = to_atm_dev(cdev);
75 70
76 pos += sprintf(pos, "%d\n", 71 return scnprintf(buf, PAGE_SIZE, "%d\n",
77 adev->signal == ATM_PHY_SIG_LOST ? 0 : 1); 72 adev->signal == ATM_PHY_SIG_LOST ? 0 : 1);
78
79 return pos - buf;
80} 73}
81 74
82static ssize_t show_link_rate(struct device *cdev, 75static ssize_t show_link_rate(struct device *cdev,
83 struct device_attribute *attr, char *buf) 76 struct device_attribute *attr, char *buf)
84{ 77{
85 char *pos = buf;
86 struct atm_dev *adev = to_atm_dev(cdev); 78 struct atm_dev *adev = to_atm_dev(cdev);
87 int link_rate; 79 int link_rate;
88 80
@@ -100,9 +92,7 @@ static ssize_t show_link_rate(struct device *cdev,
100 default: 92 default:
101 link_rate = adev->link_rate * 8 * 53; 93 link_rate = adev->link_rate * 8 * 53;
102 } 94 }
103 pos += sprintf(pos, "%d\n", link_rate); 95 return scnprintf(buf, PAGE_SIZE, "%d\n", link_rate);
104
105 return pos - buf;
106} 96}
107 97
108static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); 98static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 6f0a2eebcb27..acc9f4cc18f7 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -83,9 +83,12 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
83 if (port) { 83 if (port) {
84 struct br_mdb_entry e; 84 struct br_mdb_entry e;
85 e.ifindex = port->dev->ifindex; 85 e.ifindex = port->dev->ifindex;
86 e.addr.u.ip4 = p->addr.u.ip4; 86 e.state = p->state;
87 if (p->addr.proto == htons(ETH_P_IP))
88 e.addr.u.ip4 = p->addr.u.ip4;
87#if IS_ENABLED(CONFIG_IPV6) 89#if IS_ENABLED(CONFIG_IPV6)
88 e.addr.u.ip6 = p->addr.u.ip6; 90 if (p->addr.proto == htons(ETH_P_IPV6))
91 e.addr.u.ip6 = p->addr.u.ip6;
89#endif 92#endif
90 e.addr.proto = p->addr.proto; 93 e.addr.proto = p->addr.proto;
91 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(e), &e)) { 94 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(e), &e)) {
@@ -253,6 +256,8 @@ static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
253#endif 256#endif
254 } else 257 } else
255 return false; 258 return false;
259 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
260 return false;
256 261
257 return true; 262 return true;
258} 263}
@@ -310,7 +315,7 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
310} 315}
311 316
312static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, 317static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
313 struct br_ip *group) 318 struct br_ip *group, unsigned char state)
314{ 319{
315 struct net_bridge_mdb_entry *mp; 320 struct net_bridge_mdb_entry *mp;
316 struct net_bridge_port_group *p; 321 struct net_bridge_port_group *p;
@@ -336,7 +341,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
336 break; 341 break;
337 } 342 }
338 343
339 p = br_multicast_new_port_group(port, group, *pp); 344 p = br_multicast_new_port_group(port, group, *pp, state);
340 if (unlikely(!p)) 345 if (unlikely(!p))
341 return -ENOMEM; 346 return -ENOMEM;
342 rcu_assign_pointer(*pp, p); 347 rcu_assign_pointer(*pp, p);
@@ -373,7 +378,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
373#endif 378#endif
374 379
375 spin_lock_bh(&br->multicast_lock); 380 spin_lock_bh(&br->multicast_lock);
376 ret = br_mdb_add_group(br, p, &ip); 381 ret = br_mdb_add_group(br, p, &ip, entry->state);
377 spin_unlock_bh(&br->multicast_lock); 382 spin_unlock_bh(&br->multicast_lock);
378 return ret; 383 return ret;
379} 384}
@@ -479,3 +484,10 @@ void br_mdb_init(void)
479 rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL); 484 rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL);
480 rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL); 485 rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL);
481} 486}
487
488void br_mdb_uninit(void)
489{
490 rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
491 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
492 rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
493}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 1093c89095d8..5391ca43336a 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -279,7 +279,7 @@ static void br_multicast_port_group_expired(unsigned long data)
279 279
280 spin_lock(&br->multicast_lock); 280 spin_lock(&br->multicast_lock);
281 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 281 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
282 hlist_unhashed(&pg->mglist)) 282 hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT)
283 goto out; 283 goto out;
284 284
285 br_multicast_del_pg(br, pg); 285 br_multicast_del_pg(br, pg);
@@ -622,7 +622,8 @@ out:
622struct net_bridge_port_group *br_multicast_new_port_group( 622struct net_bridge_port_group *br_multicast_new_port_group(
623 struct net_bridge_port *port, 623 struct net_bridge_port *port,
624 struct br_ip *group, 624 struct br_ip *group,
625 struct net_bridge_port_group __rcu *next) 625 struct net_bridge_port_group __rcu *next,
626 unsigned char state)
626{ 627{
627 struct net_bridge_port_group *p; 628 struct net_bridge_port_group *p;
628 629
@@ -632,6 +633,7 @@ struct net_bridge_port_group *br_multicast_new_port_group(
632 633
633 p->addr = *group; 634 p->addr = *group;
634 p->port = port; 635 p->port = port;
636 p->state = state;
635 rcu_assign_pointer(p->next, next); 637 rcu_assign_pointer(p->next, next);
636 hlist_add_head(&p->mglist, &port->mglist); 638 hlist_add_head(&p->mglist, &port->mglist);
637 setup_timer(&p->timer, br_multicast_port_group_expired, 639 setup_timer(&p->timer, br_multicast_port_group_expired,
@@ -674,7 +676,7 @@ static int br_multicast_add_group(struct net_bridge *br,
674 break; 676 break;
675 } 677 }
676 678
677 p = br_multicast_new_port_group(port, group, *pp); 679 p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY);
678 if (unlikely(!p)) 680 if (unlikely(!p))
679 goto err; 681 goto err;
680 rcu_assign_pointer(*pp, p); 682 rcu_assign_pointer(*pp, p);
@@ -1165,7 +1167,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1165 if (max_delay) 1167 if (max_delay)
1166 group = &mld->mld_mca; 1168 group = &mld->mld_mca;
1167 } else if (skb->len >= sizeof(*mld2q)) { 1169 } else if (skb->len >= sizeof(*mld2q)) {
1168 u16 mrc;
1169 if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1170 if (!pskb_may_pull(skb, sizeof(*mld2q))) {
1170 err = -EINVAL; 1171 err = -EINVAL;
1171 goto out; 1172 goto out;
@@ -1173,8 +1174,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1173 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1174 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1174 if (!mld2q->mld2q_nsrcs) 1175 if (!mld2q->mld2q_nsrcs)
1175 group = &mld2q->mld2q_mca; 1176 group = &mld2q->mld2q_mca;
1176 mrc = ntohs(mld2q->mld2q_mrc); 1177 max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1;
1177 max_delay = mrc ? MLDV2_MRC(mrc) : 1;
1178 } 1178 }
1179 1179
1180 if (!group) 1180 if (!group)
@@ -1633,6 +1633,7 @@ void br_multicast_stop(struct net_bridge *br)
1633 del_timer_sync(&br->multicast_querier_timer); 1633 del_timer_sync(&br->multicast_querier_timer);
1634 del_timer_sync(&br->multicast_query_timer); 1634 del_timer_sync(&br->multicast_query_timer);
1635 1635
1636 br_mdb_uninit();
1636 spin_lock_bh(&br->multicast_lock); 1637 spin_lock_bh(&br->multicast_lock);
1637 mdb = mlock_dereference(br->mdb, br); 1638 mdb = mlock_dereference(br->mdb, br);
1638 if (!mdb) 1639 if (!mdb)
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index dead9dfe865b..97ba0189c6f7 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -305,5 +305,4 @@ int __init br_netlink_init(void)
305void __exit br_netlink_fini(void) 305void __exit br_netlink_fini(void)
306{ 306{
307 rtnl_link_unregister(&br_link_ops); 307 rtnl_link_unregister(&br_link_ops);
308 rtnl_unregister_all(PF_BRIDGE);
309} 308}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index f21a739a6186..8d83be5ffedc 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -83,6 +83,7 @@ struct net_bridge_port_group {
83 struct rcu_head rcu; 83 struct rcu_head rcu;
84 struct timer_list timer; 84 struct timer_list timer;
85 struct br_ip addr; 85 struct br_ip addr;
86 unsigned char state;
86}; 87};
87 88
88struct net_bridge_mdb_entry 89struct net_bridge_mdb_entry
@@ -443,8 +444,10 @@ extern void br_multicast_free_pg(struct rcu_head *head);
443extern struct net_bridge_port_group *br_multicast_new_port_group( 444extern struct net_bridge_port_group *br_multicast_new_port_group(
444 struct net_bridge_port *port, 445 struct net_bridge_port *port,
445 struct br_ip *group, 446 struct br_ip *group,
446 struct net_bridge_port_group *next); 447 struct net_bridge_port_group *next,
448 unsigned char state);
447extern void br_mdb_init(void); 449extern void br_mdb_init(void);
450extern void br_mdb_uninit(void);
448extern void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, 451extern void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
449 struct br_ip *group, int type); 452 struct br_ip *group, int type);
450 453
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 176ecdba4a22..4f9f5eb478f1 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -439,8 +439,8 @@ exit:
439 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 439 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
440 return NULL; 440 return NULL;
441put_and_exit: 441put_and_exit:
442 bh_unlock_sock(newsk); 442 inet_csk_prepare_forced_close(newsk);
443 sock_put(newsk); 443 dccp_done(newsk);
444 goto exit; 444 goto exit;
445} 445}
446 446
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 56840b249f3b..6e05981f271e 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -585,7 +585,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
585 newinet->inet_rcv_saddr = LOOPBACK4_IPV6; 585 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
586 586
587 if (__inet_inherit_port(sk, newsk) < 0) { 587 if (__inet_inherit_port(sk, newsk) < 0) {
588 sock_put(newsk); 588 inet_csk_prepare_forced_close(newsk);
589 dccp_done(newsk);
589 goto out; 590 goto out;
590 } 591 }
591 __inet6_hash(newsk, NULL); 592 __inet6_hash(newsk, NULL);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 2026542d6836..d0670f00d524 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -710,6 +710,22 @@ void inet_csk_destroy_sock(struct sock *sk)
710} 710}
711EXPORT_SYMBOL(inet_csk_destroy_sock); 711EXPORT_SYMBOL(inet_csk_destroy_sock);
712 712
713/* This function allows to force a closure of a socket after the call to
714 * tcp/dccp_create_openreq_child().
715 */
716void inet_csk_prepare_forced_close(struct sock *sk)
717{
718 /* sk_clone_lock locked the socket and set refcnt to 2 */
719 bh_unlock_sock(sk);
720 sock_put(sk);
721
722 /* The below has to be done to allow calling inet_csk_destroy_sock */
723 sock_set_flag(sk, SOCK_DEAD);
724 percpu_counter_inc(sk->sk_prot->orphan_count);
725 inet_sk(sk)->inet_num = 0;
726}
727EXPORT_SYMBOL(inet_csk_prepare_forced_close);
728
713int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) 729int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
714{ 730{
715 struct inet_sock *inet = inet_sk(sk); 731 struct inet_sock *inet = inet_sk(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1ed230716d51..54139fa514e6 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1767,10 +1767,8 @@ exit:
1767 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1767 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1768 return NULL; 1768 return NULL;
1769put_and_exit: 1769put_and_exit:
1770 tcp_clear_xmit_timers(newsk); 1770 inet_csk_prepare_forced_close(newsk);
1771 tcp_cleanup_congestion_control(newsk); 1771 tcp_done(newsk);
1772 bh_unlock_sock(newsk);
1773 sock_put(newsk);
1774 goto exit; 1772 goto exit;
1775} 1773}
1776EXPORT_SYMBOL(tcp_v4_syn_recv_sock); 1774EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 2068ac4fbdad..4ea244891b58 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -41,6 +41,6 @@ obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
41obj-$(CONFIG_IPV6_GRE) += ip6_gre.o 41obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
42 42
43obj-y += addrconf_core.o exthdrs_core.o 43obj-y += addrconf_core.o exthdrs_core.o
44obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6_offload) 44obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload)
45 45
46obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o 46obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6fca01f136ad..408cac4ae00a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -534,8 +534,7 @@ void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
534 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_ATOMIC); 534 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_ATOMIC);
535 return; 535 return;
536errout: 536errout:
537 if (err < 0) 537 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
538 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
539} 538}
540 539
541static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = { 540static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index f2a007b7bde3..6574175795df 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1314,6 +1314,12 @@ out:
1314 1314
1315static void ndisc_redirect_rcv(struct sk_buff *skb) 1315static void ndisc_redirect_rcv(struct sk_buff *skb)
1316{ 1316{
1317 u8 *hdr;
1318 struct ndisc_options ndopts;
1319 struct rd_msg *msg = (struct rd_msg *)skb_transport_header(skb);
1320 u32 ndoptlen = skb->tail - (skb->transport_header +
1321 offsetof(struct rd_msg, opt));
1322
1317#ifdef CONFIG_IPV6_NDISC_NODETYPE 1323#ifdef CONFIG_IPV6_NDISC_NODETYPE
1318 switch (skb->ndisc_nodetype) { 1324 switch (skb->ndisc_nodetype) {
1319 case NDISC_NODETYPE_HOST: 1325 case NDISC_NODETYPE_HOST:
@@ -1330,6 +1336,17 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1330 return; 1336 return;
1331 } 1337 }
1332 1338
1339 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts))
1340 return;
1341
1342 if (!ndopts.nd_opts_rh)
1343 return;
1344
1345 hdr = (u8 *)ndopts.nd_opts_rh;
1346 hdr += 8;
1347 if (!pskb_pull(skb, hdr - skb_transport_header(skb)))
1348 return;
1349
1333 icmpv6_notify(skb, NDISC_REDIRECT, 0, 0); 1350 icmpv6_notify(skb, NDISC_REDIRECT, 0, 0);
1334} 1351}
1335 1352
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6565cf55eb1e..93825dd3a7c0 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1288,7 +1288,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1288#endif 1288#endif
1289 1289
1290 if (__inet_inherit_port(sk, newsk) < 0) { 1290 if (__inet_inherit_port(sk, newsk) < 0) {
1291 sock_put(newsk); 1291 inet_csk_prepare_forced_close(newsk);
1292 tcp_done(newsk);
1292 goto out; 1293 goto out;
1293 } 1294 }
1294 __inet6_hash(newsk, NULL); 1295 __inet6_hash(newsk, NULL);
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
index e748aed290aa..b7c7f815deae 100644
--- a/net/mac802154/ieee802154_dev.c
+++ b/net/mac802154/ieee802154_dev.c
@@ -224,9 +224,9 @@ void ieee802154_free_device(struct ieee802154_dev *hw)
224 224
225 BUG_ON(!list_empty(&priv->slaves)); 225 BUG_ON(!list_empty(&priv->slaves));
226 226
227 wpan_phy_free(priv->phy);
228
229 mutex_destroy(&priv->slaves_mtx); 227 mutex_destroy(&priv->slaves_mtx);
228
229 wpan_phy_free(priv->phy);
230} 230}
231EXPORT_SYMBOL(ieee802154_free_device); 231EXPORT_SYMBOL(ieee802154_free_device);
232 232
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index c8a1eb6eca2d..c0353d55d56f 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -669,6 +669,9 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
669 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 669 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
670 int err; 670 int err;
671 671
672 if (addr_len < sizeof(struct sockaddr_nl))
673 return -EINVAL;
674
672 if (nladdr->nl_family != AF_NETLINK) 675 if (nladdr->nl_family != AF_NETLINK)
673 return -EINVAL; 676 return -EINVAL;
674 677
@@ -2059,7 +2062,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
2059 struct sock *s = v; 2062 struct sock *s = v;
2060 struct netlink_sock *nlk = nlk_sk(s); 2063 struct netlink_sock *nlk = nlk_sk(s);
2061 2064
2062 seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n", 2065 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
2063 s, 2066 s,
2064 s->sk_protocol, 2067 s->sk_protocol,
2065 nlk->portid, 2068 nlk->portid,
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index a9edd2e205f4..c26210618e14 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -66,12 +66,36 @@ config SCTP_DBG_OBJCNT
66 'cat /proc/net/sctp/sctp_dbg_objcnt' 66 'cat /proc/net/sctp/sctp_dbg_objcnt'
67 67
68 If unsure, say N 68 If unsure, say N
69choice
70 prompt "Default SCTP cookie HMAC encoding"
71 default SCTP_COOKIE_HMAC_MD5
72 help
73 This option sets the default sctp cookie hmac algorithm
74 when in doubt select 'md5'
75
76config SCTP_DEFAULT_COOKIE_HMAC_MD5
77 bool "Enable optional MD5 hmac cookie generation"
78 help
79 Enable optional MD5 hmac based SCTP cookie generation
80 select SCTP_COOKIE_HMAC_MD5
81
82config SCTP_DEFAULT_COOKIE_HMAC_SHA1
83 bool "Enable optional SHA1 hmac cookie generation"
84 help
85 Enable optional SHA1 hmac based SCTP cookie generation
86 select SCTP_COOKIE_HMAC_SHA1
87
88config SCTP_DEFAULT_COOKIE_HMAC_NONE
89 bool "Use no hmac alg in SCTP cookie generation"
90 help
91 Use no hmac algorithm in SCTP cookie generation
92
93endchoice
69 94
70config SCTP_COOKIE_HMAC_MD5 95config SCTP_COOKIE_HMAC_MD5
71 bool "Enable optional MD5 hmac cookie generation" 96 bool "Enable optional MD5 hmac cookie generation"
72 help 97 help
73 Enable optional MD5 hmac based SCTP cookie generation 98 Enable optional MD5 hmac based SCTP cookie generation
74 default y
75 select CRYPTO_HMAC if SCTP_COOKIE_HMAC_MD5 99 select CRYPTO_HMAC if SCTP_COOKIE_HMAC_MD5
76 select CRYPTO_MD5 if SCTP_COOKIE_HMAC_MD5 100 select CRYPTO_MD5 if SCTP_COOKIE_HMAC_MD5
77 101
@@ -79,7 +103,6 @@ config SCTP_COOKIE_HMAC_SHA1
79 bool "Enable optional SHA1 hmac cookie generation" 103 bool "Enable optional SHA1 hmac cookie generation"
80 help 104 help
81 Enable optional SHA1 hmac based SCTP cookie generation 105 Enable optional SHA1 hmac based SCTP cookie generation
82 default y
83 select CRYPTO_HMAC if SCTP_COOKIE_HMAC_SHA1 106 select CRYPTO_HMAC if SCTP_COOKIE_HMAC_SHA1
84 select CRYPTO_SHA1 if SCTP_COOKIE_HMAC_SHA1 107 select CRYPTO_SHA1 if SCTP_COOKIE_HMAC_SHA1
85 108
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index bc6cd75cc1dc..5f7518de2fd1 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -122,7 +122,8 @@ static const struct file_operations sctpprobe_fops = {
122 .llseek = noop_llseek, 122 .llseek = noop_llseek,
123}; 123};
124 124
125sctp_disposition_t jsctp_sf_eat_sack(const struct sctp_endpoint *ep, 125sctp_disposition_t jsctp_sf_eat_sack(struct net *net,
126 const struct sctp_endpoint *ep,
126 const struct sctp_association *asoc, 127 const struct sctp_association *asoc,
127 const sctp_subtype_t type, 128 const sctp_subtype_t type,
128 void *arg, 129 void *arg,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 2c7785bacf74..f898b1c58bd2 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1191,9 +1191,9 @@ static int __net_init sctp_net_init(struct net *net)
1191 net->sctp.cookie_preserve_enable = 1; 1191 net->sctp.cookie_preserve_enable = 1;
1192 1192
1193 /* Default sctp sockets to use md5 as their hmac alg */ 1193 /* Default sctp sockets to use md5 as their hmac alg */
1194#if defined (CONFIG_CRYPTO_MD5) 1194#if defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5)
1195 net->sctp.sctp_hmac_alg = "md5"; 1195 net->sctp.sctp_hmac_alg = "md5";
1196#elif defined (CONFIG_CRYPTO_SHA1) 1196#elif defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1)
1197 net->sctp.sctp_hmac_alg = "sha1"; 1197 net->sctp.sctp_hmac_alg = "sha1";
1198#else 1198#else
1199 net->sctp.sctp_hmac_alg = NULL; 1199 net->sctp.sctp_hmac_alg = NULL;
diff --git a/scripts/Makefile.modsign b/scripts/Makefile.modsign
new file mode 100644
index 000000000000..abfda626dbad
--- /dev/null
+++ b/scripts/Makefile.modsign
@@ -0,0 +1,32 @@
1# ==========================================================================
2# Signing modules
3# ==========================================================================
4
5PHONY := __modsign
6__modsign:
7
8include scripts/Kbuild.include
9
10__modules := $(sort $(shell grep -h '\.ko' /dev/null $(wildcard $(MODVERDIR)/*.mod)))
11modules := $(patsubst %.o,%.ko,$(wildcard $(__modules:.ko=.o)))
12
13PHONY += $(modules)
14__modsign: $(modules)
15 @:
16
17quiet_cmd_sign_ko = SIGN [M] $(2)/$(notdir $@)
18 cmd_sign_ko = $(mod_sign_cmd) $(2)/$(notdir $@)
19
20# Modules built outside the kernel source tree go into extra by default
21INSTALL_MOD_DIR ?= extra
22ext-mod-dir = $(INSTALL_MOD_DIR)$(subst $(patsubst %/,%,$(KBUILD_EXTMOD)),,$(@D))
23
24modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
25
26$(modules):
27 $(call cmd,sign_ko,$(MODLIB)/$(modinst_dir))
28
29# Declare the contents of the .PHONY variable as phony. We keep that
30# information in a variable se we can use it in if_changed and friends.
31
32.PHONY: $(PHONY)
diff --git a/scripts/coccinelle/api/d_find_alias.cocci b/scripts/coccinelle/api/d_find_alias.cocci
new file mode 100644
index 000000000000..a9694a8d3e5a
--- /dev/null
+++ b/scripts/coccinelle/api/d_find_alias.cocci
@@ -0,0 +1,80 @@
1/// Make sure calls to d_find_alias() have a corresponding call to dput().
2//
3// Keywords: d_find_alias, dput
4//
5// Confidence: Moderate
6// URL: http://coccinelle.lip6.fr/
7// Options: -include_headers
8
9virtual context
10virtual org
11virtual patch
12virtual report
13
14@r exists@
15local idexpression struct dentry *dent;
16expression E, E1;
17statement S1, S2;
18position p1, p2;
19@@
20(
21 if (!(dent@p1 = d_find_alias(...))) S1
22|
23 dent@p1 = d_find_alias(...)
24)
25
26<...when != dput(dent)
27 when != if (...) { <+... dput(dent) ...+> }
28 when != true !dent || ...
29 when != dent = E
30 when != E = dent
31if (!dent || ...) S2
32...>
33(
34 return <+...dent...+>;
35|
36 return @p2 ...;
37|
38 dent@p2 = E1;
39|
40 E1 = dent;
41)
42
43@depends on context@
44local idexpression struct dentry *r.dent;
45position r.p1,r.p2;
46@@
47* dent@p1 = ...
48 ...
49(
50* return@p2 ...;
51|
52* dent@p2
53)
54
55
56@script:python depends on org@
57p1 << r.p1;
58p2 << r.p2;
59@@
60cocci.print_main("Missing call to dput()",p1)
61cocci.print_secs("",p2)
62
63@depends on patch@
64local idexpression struct dentry *r.dent;
65position r.p2;
66@@
67(
68+ dput(dent);
69 return @p2 ...;
70|
71+ dput(dent);
72 dent@p2 = ...;
73)
74
75@script:python depends on report@
76p1 << r.p1;
77p2 << r.p2;
78@@
79msg = "Missing call to dput() at line %s."
80coccilib.report.print_report(p1[0], msg % (p2[0].line))
diff --git a/security/capability.c b/security/capability.c
index b14a30c234b8..0fe5a026aef8 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -395,6 +395,11 @@ static int cap_kernel_module_request(char *kmod_name)
395 return 0; 395 return 0;
396} 396}
397 397
398static int cap_kernel_module_from_file(struct file *file)
399{
400 return 0;
401}
402
398static int cap_task_setpgid(struct task_struct *p, pid_t pgid) 403static int cap_task_setpgid(struct task_struct *p, pid_t pgid)
399{ 404{
400 return 0; 405 return 0;
@@ -967,6 +972,7 @@ void __init security_fixup_ops(struct security_operations *ops)
967 set_to_cap_if_null(ops, kernel_act_as); 972 set_to_cap_if_null(ops, kernel_act_as);
968 set_to_cap_if_null(ops, kernel_create_files_as); 973 set_to_cap_if_null(ops, kernel_create_files_as);
969 set_to_cap_if_null(ops, kernel_module_request); 974 set_to_cap_if_null(ops, kernel_module_request);
975 set_to_cap_if_null(ops, kernel_module_from_file);
970 set_to_cap_if_null(ops, task_fix_setuid); 976 set_to_cap_if_null(ops, task_fix_setuid);
971 set_to_cap_if_null(ops, task_setpgid); 977 set_to_cap_if_null(ops, task_setpgid);
972 set_to_cap_if_null(ops, task_getpgid); 978 set_to_cap_if_null(ops, task_getpgid);
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 6ee8826662cc..3b2adb794f15 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -127,7 +127,7 @@ struct integrity_iint_cache *integrity_iint_insert(struct inode *inode);
127struct integrity_iint_cache *integrity_iint_find(struct inode *inode); 127struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
128 128
129/* IMA policy related functions */ 129/* IMA policy related functions */
130enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK, POST_SETATTR }; 130enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK, MODULE_CHECK, POST_SETATTR };
131 131
132int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask, 132int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
133 int flags); 133 int flags);
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index b356884fb3ef..0cea3db21657 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -100,12 +100,12 @@ err_out:
100 * ima_get_action - appraise & measure decision based on policy. 100 * ima_get_action - appraise & measure decision based on policy.
101 * @inode: pointer to inode to measure 101 * @inode: pointer to inode to measure
102 * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE) 102 * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE)
103 * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP) 103 * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP, MODULE_CHECK)
104 * 104 *
105 * The policy is defined in terms of keypairs: 105 * The policy is defined in terms of keypairs:
106 * subj=, obj=, type=, func=, mask=, fsmagic= 106 * subj=, obj=, type=, func=, mask=, fsmagic=
107 * subj,obj, and type: are LSM specific. 107 * subj,obj, and type: are LSM specific.
108 * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP 108 * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP | MODULE_CHECK
109 * mask: contains the permission mask 109 * mask: contains the permission mask
110 * fsmagic: hex value 110 * fsmagic: hex value
111 * 111 *
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 73c9a268253e..45de18e9a6f2 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -280,6 +280,27 @@ int ima_file_check(struct file *file, int mask)
280} 280}
281EXPORT_SYMBOL_GPL(ima_file_check); 281EXPORT_SYMBOL_GPL(ima_file_check);
282 282
283/**
284 * ima_module_check - based on policy, collect/store/appraise measurement.
285 * @file: pointer to the file to be measured/appraised
286 *
287 * Measure/appraise kernel modules based on policy.
288 *
289 * Always return 0 and audit dentry_open failures.
290 * Return code is based upon measurement appraisal.
291 */
292int ima_module_check(struct file *file)
293{
294 int rc;
295
296 if (!file)
297 rc = INTEGRITY_UNKNOWN;
298 else
299 rc = process_measurement(file, file->f_dentry->d_name.name,
300 MAY_EXEC, MODULE_CHECK);
301 return (ima_appraise & IMA_APPRAISE_ENFORCE) ? rc : 0;
302}
303
283static int __init init_ima(void) 304static int __init init_ima(void)
284{ 305{
285 int error; 306 int error;
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index c7dacd2eab7a..af7d182d5a46 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -80,6 +80,7 @@ static struct ima_rule_entry default_rules[] = {
80 .flags = IMA_FUNC | IMA_MASK}, 80 .flags = IMA_FUNC | IMA_MASK},
81 {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = GLOBAL_ROOT_UID, 81 {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = GLOBAL_ROOT_UID,
82 .flags = IMA_FUNC | IMA_MASK | IMA_UID}, 82 .flags = IMA_FUNC | IMA_MASK | IMA_UID},
83 {.action = MEASURE,.func = MODULE_CHECK, .flags = IMA_FUNC},
83}; 84};
84 85
85static struct ima_rule_entry default_appraise_rules[] = { 86static struct ima_rule_entry default_appraise_rules[] = {
@@ -401,6 +402,8 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
401 /* PATH_CHECK is for backwards compat */ 402 /* PATH_CHECK is for backwards compat */
402 else if (strcmp(args[0].from, "PATH_CHECK") == 0) 403 else if (strcmp(args[0].from, "PATH_CHECK") == 0)
403 entry->func = FILE_CHECK; 404 entry->func = FILE_CHECK;
405 else if (strcmp(args[0].from, "MODULE_CHECK") == 0)
406 entry->func = MODULE_CHECK;
404 else if (strcmp(args[0].from, "FILE_MMAP") == 0) 407 else if (strcmp(args[0].from, "FILE_MMAP") == 0)
405 entry->func = FILE_MMAP; 408 entry->func = FILE_MMAP;
406 else if (strcmp(args[0].from, "BPRM_CHECK") == 0) 409 else if (strcmp(args[0].from, "BPRM_CHECK") == 0)
diff --git a/security/security.c b/security/security.c
index 8dcd4ae10a5f..daa97f4ac9d1 100644
--- a/security/security.c
+++ b/security/security.c
@@ -820,6 +820,16 @@ int security_kernel_module_request(char *kmod_name)
820 return security_ops->kernel_module_request(kmod_name); 820 return security_ops->kernel_module_request(kmod_name);
821} 821}
822 822
823int security_kernel_module_from_file(struct file *file)
824{
825 int ret;
826
827 ret = security_ops->kernel_module_from_file(file);
828 if (ret)
829 return ret;
830 return ima_module_check(file);
831}
832
823int security_task_fix_setuid(struct cred *new, const struct cred *old, 833int security_task_fix_setuid(struct cred *new, const struct cred *old,
824 int flags) 834 int flags)
825{ 835{
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 370a6468b3ba..855e464e92ef 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -69,6 +69,8 @@ static struct nlmsg_perm nlmsg_route_perms[] =
69 { RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, 69 { RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
70 { RTM_NEWNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, 70 { RTM_NEWNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
71 { RTM_GETNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_READ }, 71 { RTM_GETNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_READ },
72 { RTM_NEWMDB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
73 { RTM_DELMDB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
72 { RTM_GETMDB, NETLINK_ROUTE_SOCKET__NLMSG_READ }, 74 { RTM_GETMDB, NETLINK_ROUTE_SOCKET__NLMSG_READ },
73}; 75};
74 76